repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
TraBS | TraBS-main/scripts/main_train.py |
from pathlib import Path
from datetime import datetime
import torch
from pytorch_lightning.trainer import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
import numpy as np
import torchio as tio
from breaststudies.data import BreastDataModule, BreastDataModuleLR, BreastDataModule2D, BreastUKADataset
from breaststudies.models import UNet, nnUNet, SwinUNETR
from breaststudies.augmentation import Resample2,RandomResample, ZNormalization, SpatialTransform2, RandomCropOrPad, Brightness, RandomDisableChannel, RescaleIntensity
if __name__ == "__main__":
current_time = datetime.now().strftime("%Y_%m_%d_%H%M%S")
path_run_dir = Path.cwd() / 'runs' / str(current_time)
torch.set_float32_matmul_precision('high')
path_run_dir.mkdir(parents=True, exist_ok=True)
# ------------- Settings ---------------------
target = 'tissue' # 'breast' or 'tissue'
batch_size = 1
roi_sizes = {
'tissue': (32, 256, 256),
'breast': (32, 512, 256),
}
roi_size = roi_sizes[target]
# ---------------------------------- Preprocessing ----------------------------------
series_trans = tio.Compose([
# tio.ToCanonical(),
# Resample2((0.64, 0.64, 3)), # exact (0.64453125, 0.64453125, 3)
])
item_trans = tio.Compose([
ZNormalization(percentiles=(0.5, 99.5), per_channel=True, masking_method=lambda x:x>0),
tio.CropOrPad(roi_size[::-1], padding_mode='minimum'),
])
# --------------------------------- Augmentation ---------------------------------------
p = 0.2
item_trans_train = tio.Compose([
ZNormalization(percentiles=(0.5, 99.5), per_channel=True, masking_method=lambda x:x>0),
# ZNormalization(per_channel=True, masking_method=lambda x:x>0),
# RescaleIntensity((-1,1), percentiles=(0.5, 99.5), per_channel=True, masking_method=lambda x:x>0),
# RescaleIntensity((-1,1), per_channel=True, masking_method=lambda x:x>0),
tio.RandomGhosting(num_ghosts=(4, 10), axes=(0,), intensity=(0.25, 1), restore=0.02, p=p),
tio.RandomFlip(axes=(0,1,2), flip_probability=0.5, p=1), #WARNING flip_probability = prob that specific axis is used , p = prob that trans is applied , 0 = left,right, 1 = buttom/top
SpatialTransform2(scales=(0.7,1.4, 0.7,1.4, 1.0,1.0), isotropic=(0,1), degrees=(0,0,0), translation=(0, 0, 0), default_pad_value = 0, image_interpolation='linear', p=p),
tio.RandomNoise(mean=0, std=(0, 0.25), p=p),
tio.RandomBlur((1.0,1.0, 0), p=p),
tio.RandomBiasField(coefficients=0.1, p=p),
Brightness((0.75, 1.25), per_channel=True, p=p),
tio.RandomGamma(log_gamma=(-0.4, 0.4), p=p),
RandomCropOrPad(roi_size[::-1], padding_mode='minimum'),
RandomDisableChannel((0,1), p=p)
])
# ------------ Optional ------------------
# ds_kwargs = {
# 'manipulate_label_func': BreastUKADataset.manipulate_label_func
# } if target == 'tissue' else {}
# ----------------------- Load Data ----------------------------------
dm = BreastDataModuleLR(
path_root = Path('/home/gustav/Documents/datasets/BreastDataset'),
batch_size=batch_size,
target=target,
series_trans=series_trans,
item_trans=item_trans,
params_ds_train={'item_trans':item_trans_train},
# source_files={'source':['Dyn_0.nii', 'T2_resampled.nii', 'Sub.nii' ]}, # Overwrites default setting associated with 'target' setting
target_shape=roi_size[::-1], # The bounding box of the breast mask is enlarged (if smaller) to target_shape to prevent padding with zeros. Only used for target=='tissue'.
# num_workers=0,
# **ds_kwargs
)
# Load fixed,balanced split
# dm._item_pointers_split = dm.load_split(Path.cwd() / 'runs/splits/BreastDatasetLR'/dm.ds_kwargs['target'],
# split_file=f'data_split_{dm.Dataset.__name__}.yaml' )
dm.setup('fit') # Run GroupKFold if item_pointers aren't initialized yet
dm.save(path_run_dir) # Save setup configs
#---------------------- Cross-Fold --------------------
for split in range(0, dm.n_splits):
path_split_dir = path_run_dir/('split_'+str(split))
path_split_dir.mkdir(parents=True, exist_ok=True)
dm.setup_split('fit', split=split) # Create train/val datasets for specific split
# --------------------------- Initialize Model ----------------------
in_ch = len(dm.ds_train.kwargs['source_files']['source'])
out_ch = len(dm.ds_train.labels) # WARNING: manipulate_label_func might affect this
# -------- Choose model --------
# model = BasicUNet(in_ch=in_ch, out_ch=out_ch, roi_size=roi_size)
# model = nnUNet(in_ch=in_ch, out_ch=out_ch, roi_size=roi_size )
model = SwinUNETR(in_ch=in_ch, out_ch=out_ch, roi_size=roi_size,
use_spacing = False, # Use spacing as an additional input information
# use_checkpoint=True
)
# --------- Load pretraining or previously trained checkpoints ------------
# model.load_pretrained(Path.cwd()/f'runs/2023_04_04_182914_SwinUNETR_pretrain/split_0/last.ckpt')
# -------------- Training Initialization ---------------
to_monitor = "val/loss" # WARNING: If log() is not called this parameter is ignored!
min_max = "min"
early_stopping = EarlyStopping(
monitor=to_monitor,
min_delta=0.0, # minimum change in the monitored quantity to qualify as an improvement
patience=100, # number of checks with no improvement
mode=min_max
)
checkpointing = ModelCheckpoint(
dirpath=str(path_split_dir), # dirpath
monitor=to_monitor,
save_last=True,
save_top_k=1,
mode=min_max,
)
trainer = Trainer(
accelerator='gpu' if torch.cuda.is_available() else 'cpu',
devices=None,
precision=16,
# amp_backend='apex',
# amp_level='O2',
gradient_clip_val=0.5,
default_root_dir=str(path_split_dir),
callbacks=[checkpointing,early_stopping ], #
enable_checkpointing=True,
check_val_every_n_epoch=1,
log_every_n_steps=1, # 50
auto_lr_find=False,
# limit_train_batches=1.0,
# limit_val_batches=0, # 0 = disable validation
# min_epochs=50,
max_epochs=1001,
num_sanity_val_steps=2,
)
# ---------------- Execute Training ----------------
trainer.fit(model, datamodule=dm)
# ------------- Save path to best model -------------
model.save_best_checkpoint(trainer.logger.log_dir, checkpointing.best_model_path)
# del trainer
| 6,986 | 42.12963 | 191 | py |
TraBS | TraBS-main/scripts/main_predict.py | from pathlib import Path
from datetime import datetime
from shutil import copyfile
import logging
import numpy as np
import torch
import torch.nn.functional as F
import SimpleITK as sitk
import torchio as tio
from breaststudies.data import BreastDatasetCreator
from breaststudies.models import UNet, nnUNet, SwinUNETR
from breaststudies.augmentation import Resample2, ZNormalization, AddBlankChannel
from breaststudies.postprocessing import keep_connected, close_holes, keep_inside
from breaststudies.utils.prediction import series_pred
#--------------------------------------------------------------------------------------------
# This script loads the model of each fold and predicts the segmentation masks as an ensemble
#--------------------------------------------------------------------------------------------
for dataset_name in ['duke',]: # 'uka','duke', 'breast-diagnosis'
# dataset_name = 'duke' # 'uka', 'duke', 'breast-diagnosis'
cohort = 'subset' # 'subset', 'entire'
for target_name in ['tissue']: # 'tissue' , 'breast'
model_version = 0
min_volume = 10**3 # Minimum breast segmentation volume in mm^3
test_time_flipping = True
Model = nnUNet # nnUNet SwinUNETR
path_run_dir = {'tissue': Path.cwd() / 'runs/2023_02_05_232342_nnUNet_tissue',
'breast': Path.cwd()/'runs/2023_02_12_154048_nnUNet_breast'}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
reference_file = {'uka': 'Dyn_0.nii', 'duke':'T1.nii.gz', 'breast-diagnosis':'T2.nii.gz' } # needed to extract Spacing, Origin, Direction
breast_mask_file = 'mask_breast.nii.gz' # needed for crop and possible preprocessing
series_trans = tio.Compose([
# ToOrientation('LPS'),
# Resample2((0.64, 0.64, 3))
])
item_trans = tio.Compose([
ZNormalization(percentiles=(0.5, 99.5), per_channel=True, masking_method=lambda x:x>0),
AddBlankChannel(1 if dataset_name=='duke' else 0) if dataset_name != "uka" else tio.Lambda(lambda x:x)
])
ds = BreastDatasetCreator(
dataset_name,
cohort,
lateral='unilateral',
target=target_name,
target_files={'mask':breast_mask_file} if target_name == 'tissue' else {},
out_format='tio',
item_trans=item_trans,
series_trans=series_trans,
)
# Set output dir
path_root_out = ds.path_root
# ------------------ Create Logger ----------------
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
path_log_dir = Path().cwd()/'results'/dataset_name/cohort/'log'/'predict'
path_log_dir.mkdir(parents=True, exist_ok=True)
logger.addHandler(logging.FileHandler(path_log_dir/'logging.log', 'w'))
# --------------------- Load Model(s) --------------
models = []
for path_split in path_run_dir[target_name].iterdir():
if not (path_split.is_dir() and path_split.name.startswith('split')):
continue
model = Model.load_best_checkpoint(path_split, version=model_version)
model.to(device)
model.eval()
models.append(model)
# --------------- Iterate Cases --------------------
# Note: Combine all items (eg. slices, sides,...) of a series/case and combine items to one prediction mask before
# proceed with next case
series_pointers = ds.get_series_pointers()
for n_series, (series_id, item_pointers) in enumerate(series_pointers.items()):
# Read Meta
case_dir = item_pointers[0][0]
logger.info(f"Case {n_series+1}: {series_id}")
try:
# Iterate over all models
pred_models = []
for model_i , model in enumerate(models):
logger.info(f"Model {model_i} predicting")
# Predict all items (e.g. left and right side of breast) and return combined mask
pred_mask, _ = series_pred(item_pointers, ds.load_item, model, test_time_flipping, device)
# Add prediction to models
pred_models.append(pred_mask)
# Apply majority voting between models
pred_models = torch.stack(pred_models).type(torch.uint8)
pred_mask = pred_models.mode(dim=0).values
# Torch to Numpy
pred_mask = pred_mask[0,0].cpu().numpy().astype(np.uint8)
# Get meta data
path_case = ds.path_root/case_dir
ref_img = sitk.ReadImage(str(path_case/reference_file[dataset_name] ))
# Apply Postprocessing
if target_name == 'breast':
label_values = ds.get_labels(exc_label_names='Background')
label_fcts = ds.get_label_fcts(exc_label_names='Background')
d,h,w = pred_mask.shape
for side in [(slice(None), slice(None), slice(w//2)), (slice(None), slice(None), slice(w//2, None))]:
pred_mask[side] = keep_connected(pred_mask[side], voxel_size=ref_img.GetSpacing(), min_volume=min_volume,
keep_only_largest=label_values, label_fcts=label_fcts)
pred_mask[side] = close_holes(pred_mask[side], label_fcts=label_fcts, label_values=label_values)
elif target_name == 'tissue':
mask_breast = sitk.GetArrayFromImage(sitk.ReadImage(str(path_case/breast_mask_file )))
pred_mask = keep_inside(pred_mask, mask_breast) # FGT can't exist outside breast-mask
# Create Nifti
pred_mask_nii = sitk.GetImageFromArray(pred_mask)
pred_mask_nii.CopyInformation(ref_img) # copies the Origin, Spacing, and Direction
# Write file
path_out = path_root_out/case_dir
path_out.mkdir(parents=True, exist_ok=True)
sitk.WriteImage(pred_mask_nii, str(path_out/f'mask_{target_name}_nn.nii.gz'))
except Exception as e:
logger.warning(f"Error: {e}")
| 6,468 | 41.559211 | 146 | py |
TraBS | TraBS-main/scripts/main_predict_kfold.py | from pathlib import Path
from shutil import copyfile
import logging
import sys
import numpy as np
import torch
import torchio as tio
import SimpleITK as sitk
from monai.metrics import compute_meandice
from breaststudies.augmentation.augmentations import Resample2, ZNormalization, ToOrientation, RandomDisableChannel, RescaleIntensity
from breaststudies.data import BreastDataModuleLR, BreastDataModule
from breaststudies.models import UNet, nnUNet, SwinUNETR
from breaststudies.utils import one_hot
from breaststudies.postprocessing import keep_connected, close_holes, keep_inside
from breaststudies.utils.prediction import series_pred
#--------------------------------------------------------------------------------------------
# This script loads the model of each fold and predicts the segmentation masks in the corresponding test fold.
#--------------------------------------------------------------------------------------------
dataset_name = 'uka' # 'uka',
cohort = 'subset' # 'subset',
target_name = 'tissue' # 'tissue' , 'breast'
model_version = 0
min_volume = 10**3 # Minimum breast segmentation volume in mm^3
test_time_flipping = True
Model = SwinUNETR # UNet, nnUNet, SwinUNETR
path_run_dir = {'tissue': Path.cwd() / 'runs/2023_04_05_192603_SwinUNETR',
'breast': Path.cwd()/'runs/2023_02_12_154048_nnUNet_breast'}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
np.set_printoptions(precision=3, suppress=True)
breast_mask_file = 'mask_breast.nii.gz'
path_out_root = Path().cwd()/'results'/dataset_name/cohort/'predictions'/target_name/(path_run_dir[target_name].name)
path_out_root.mkdir(parents=True, exist_ok=True)
# ------------ Load DataModule ----------------
series_trans = tio.Compose([ # Every spatial transformation must be have a reverse function, otherwise predicted masks will not fit to original images
# tio.ToCanonical(),
# ToOrientation('LPS'),
# Resample2((0.64, 0.64, 3))
# RandomDisableChannel(channels=(0,), p=1)
])
# Make sure you overwrite, otherwise the train transformations will be loaded.
item_trans = ZNormalization(percentiles=(0.5, 99.5), per_channel=True, masking_method=lambda x:x>0)
dm = BreastDataModuleLR.load(path_run_dir[target_name], out_format='tio', target=target_name, item_trans=item_trans, series_trans=series_trans)
#---------------------- Cross-validation --------------------
path_out_pred = path_out_root/'predictions'
for split in range(0, 5):
# ---------------- Logger ------------------
path_log_dir = path_out_root/'log'
path_log_dir.mkdir(parents=True, exist_ok=True)
path_log_file = path_log_dir/f'logging_split_{split}.log'
logger = logging.getLogger(__name__)
s_handler = logging.StreamHandler(sys.stdout)
f_handler = logging.FileHandler(path_log_file, 'w')
logging.basicConfig(level=logging.INFO,
format='%(message)s',
handlers=[s_handler, f_handler])
# -------- Setup Dataset ------------
dm.setup_split('test', split=split)
ds = dm.ds_test
# ------------- Load Model ----------
# model = Model.load_best_checkpoint(path_run_dir[target_name]/f'split_{split}', version=model_version)
model = Model.load_from_checkpoint(path_run_dir[target_name]/f'split_{split}/last.ckpt', version=model_version)
model.to(device)
model.eval()
# --------------- Iterate over all Series within fold --------------------------
series_pointers = ds.get_series_pointers()
dices = []
y_pred = { lab_name:[] for lab_name in ds.labels.keys()}
y_true = { lab_name:[] for lab_name in ds.labels.keys()}
delta_times = []
for n_series, (series_id, item_pointers) in enumerate(series_pointers.items()):
# Read Meta
case_dir = series_id
logger.info(f"Split {split} Case {n_series+1}: {case_dir}")
# Create output dir
path_out_dir = path_out_pred/case_dir
path_out_dir.mkdir(exist_ok=True, parents=True)
# Predict all items (e.g. left and right side of breast) and return combined mask
pred_mask, item_pred_times = series_pred(item_pointers, ds.load_item, model, test_time_flipping, device)
# Store computation time for series
logger.info(f"Series Computation took {sum(item_pred_times):.3f}s")
delta_times.append(item_pred_times)
# Torch to Numpy
pred_mask = pred_mask[0,0].cpu().numpy().astype(np.uint8)
# Get target
target_file_name = ds.default_target_files[target_name]['target']
path_item = ds.path_root/item_pointers[0][0]
target_nii = tio.LabelMap(path_item/target_file_name)
# target_nii = tio.ToCanonical()(target_nii)
target_nii = target_nii.as_sitk()
target = torch.as_tensor(sitk.GetArrayFromImage(target_nii)[None,None]).long()
# ----------- Apply Postprocessing --------------
if target_name == 'breast':
label_values = ds.get_labels(exc_label_names='Background')
label_fcts = ds.get_label_fcts(exc_label_names='Background')
d,h,w = pred_mask.shape
for side in [(slice(None), slice(None), slice(w//2)), (slice(None), slice(None), slice(w//2, None))]:
pred_mask[side] = keep_connected(pred_mask[side], voxel_size=target_nii.GetSpacing(), min_volume=min_volume,
keep_only_largest=label_values, label_fcts=label_fcts)
pred_mask[side] = close_holes(pred_mask[side], label_fcts=label_fcts, label_values=label_values)
# elif target_name == 'tissue':
# mask_breast = sitk.GetArrayFromImage(sitk.ReadImage(str(path_item/breast_mask_file )))
# pred_mask = keep_inside(pred_mask, mask_breast) # FGT can't exist outside breast-mask
# --------------------- Save prediction on disk ------------------------
pred_mask_nii = sitk.GetImageFromArray(pred_mask)
pred_mask_nii.CopyInformation(target_nii) # copies the Origin, Spacing, and Direction
sitk.WriteImage(pred_mask_nii, str(path_out_dir/f'mask_{target_name}_nn.nii.gz'))
# --------------------- (Optional) Performance Metrics ---------------------
pred_mask = torch.as_tensor(pred_mask[None, None], dtype=torch.long)
# Hot Fix
target[target==5] = 1
target[target==6] = 3
target_onehot = one_hot(target[:,0], num_classes=len(ds.labels))# .type(source.dtype)
pred_onehot = one_hot(pred_mask[:,0], num_classes=len(ds.labels)) # [Batch, Classes, D, H, W]
dice_score = compute_meandice(pred_onehot, target_onehot, ignore_empty=False)[0]
dice_score = dice_score.cpu().numpy().flatten()
dices.append(dice_score)
logger.info(f"Dice {dice_score}")
for label_name, label_val in ds.labels.items():
y_pred[label_name].append(label_val in pred_mask)
y_true[label_name].append(label_val in target)
logger.info("")
# -------------------- (Optional) Performance Evaluation ------------------------------
delta_times = np.asarray(delta_times) #[items, rois]
delta_times_items = np.sum(delta_times, 1)
logger.info(f"Mean Computation took of Items {np.mean(delta_times_items):.3f} ± {np.std(delta_times_items):.3f} s")
logger.info(f"Mean Computation took of Series {np.mean(delta_times):.3f} ± {np.std(delta_times):.3f} s")
dices = np.asarray(dices)
if len(dices) == 0:
continue
for label_name, label_val in ds.labels.items():
dice = dices[:, label_val]
logger.info("Dice {}: {:.3f} ± {:.3f}; {:.3f} [{:.3f}, {:.3f}] Min {:.3f}, Max {:.3f}".format(label_name, np.mean(dice),np.std(dice), *np.percentile(dice, q=[50, 2.5, 97.5]), np.min(dice), np.max(dice)) )
for label_name, label_val in ds.labels.items():
if label_val == 0:
continue
y_pred_lab = np.asarray(y_pred[label_name])
y_true_lab = np.asarray(y_true[label_name])
tp = np.sum( (y_true_lab==1) & (y_pred_lab==1) )
fp = np.sum( (y_true_lab==0) & (y_pred_lab==1) )
fn = np.sum( (y_true_lab==1) & (y_pred_lab==0) )
tn = np.sum( (y_true_lab==0) & (y_pred_lab==0) )
conf_matrix = [ [tp, fp], [fn, tn] ]
logger.info(f"Label {label_name} {np.sum(y_true_lab)}")
logger.info("Confusion matrix {}".format(conf_matrix))
logger.info("Sensitivity {:.2f}".format(tp/(tp+fn+1e-9)))
logger.info("1- Spec {:.2f}".format(1-tn/(tn+fp+1e-9)))
| 8,639 | 44.235602 | 213 | py |
TraBS | TraBS-main/breaststudies/models/basic_model.py |
from pathlib import Path
import json
import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from torchvision.utils import save_image
from pytorch_lightning.utilities.cloud_io import load as pl_load
from pytorch_lightning.utilities.migration import pl_legacy_patch
from pytorch_msssim import ssim
from monai.losses import DiceCELoss
from monai.inferers import sliding_window_inference
from monai.metrics import compute_meandice
from breaststudies.utils import one_hot, tensor_mask2image, tensor2image
class BasicModel(pl.LightningModule):
def __init__(
self,
in_ch,
out_ch,
roi_size,
optimizer=torch.optim.AdamW,
optimizer_kwargs={'lr':1e-4, 'weight_decay':1e-2},
lr_scheduler= None,
lr_scheduler_kwargs={},
loss=DiceCELoss,
loss_kwargs={'include_background':False, 'softmax':True, 'to_onehot_y':True, 'batch':False, 'smooth_nr':1e-5, 'smooth_dr':1e-5},
target_type='segmentation', # [segmentation, image, vector]
sample_every_n_steps = 1000,
**kwargs
):
super().__init__()
self.save_hyperparameters()
self.in_ch = in_ch
self.out_ch = out_ch
self.roi_size = roi_size
self.optimizer = optimizer
self.optimizer_kwargs = optimizer_kwargs
self.lr_scheduler = lr_scheduler
self.lr_scheduler_kwargs = lr_scheduler_kwargs
self.sample_every_n_steps = sample_every_n_steps
self.kwargs = kwargs
self._step_train = -1
self._step_val = -1
self._step_test = -1
self.loss_fct = loss(**loss_kwargs)
self.target_type = target_type
def forward(self, x_in, **kwargs):
# raise NotImplementedError
return x_in, [x_in] # Only dummy code - should return pred_horizontal, [pred_vertical]
def predict(self, x_in, **kwargs):
"""Get final prediction (not vertical predictions, etc.) """
return self(x_in, **kwargs)[0]
def infer(self, x_in, **kwargs):
# NOTE: If x_in has shape > patch_shape, input is separated into multiple overlapping patches which are fused
return sliding_window_inference(x_in, self.roi_size, 2, self.predict, 0.5, "gaussian", **kwargs)
def _step(self, batch: dict, batch_idx: int, state: str, step: int):
source, target = batch['source'], batch['target']
batch_size = source.shape[0]
interpolation_mode = 'nearest-exact' # if self.target_type == 'segmentation' else 'area'
# Run Model
pred, pred_vertical = self(source, spacing=batch.get('spacing', None))
# Only relevant for image2image training: if model is float16, pred is also float16 but target (image) is float32
if (target.dtype==torch.float32) and (pred.dtype == torch.float16):
target = target.type(torch.float16)
# ------------------------- Compute Loss ---------------------------
logging_dict = {}
logging_dict['loss'] = self.loss_fct(pred, target)
for i, pred_i in enumerate(pred_vertical):
weight = 1 #torch.prod(torch.tensor(pred_i.shape))/torch.prod(torch.tensor(pred.shape))
target_i = F.interpolate(target, size=pred_i.shape[2:], mode=interpolation_mode, align_corners=None)
logging_dict['loss'] += self.loss_fct(pred_i, target_i)*weight
# # --------------------- Compute Metrics -------------------------------
with torch.no_grad():
if self.target_type == "segmentation":
target_onehot = one_hot(target[:,0], num_classes=self.out_ch)
pred_soft = F.softmax(pred, dim=1)
pred_mask = torch.argmax(pred_soft, keepdim=True, dim=1).type(target.dtype)
pred_onehot = one_hot(pred_mask[:,0], num_classes=self.out_ch)
dice = compute_meandice(pred_onehot, target_onehot, include_background=True, ignore_empty=False)[0]
soft_dice = compute_meandice(pred_soft, target_onehot, include_background=True, ignore_empty=False)[0]
logging_dict['dice'] = torch.mean(dice)
logging_dict['softdice'] = torch.mean(dice)
logging_dict['ce'] = F.cross_entropy(pred, target[:,0].long())
for label_n in range(self.out_ch):
hotlabel = torch.zeros(self.out_ch, device=pred.device)
hotlabel[label_n] = 1
logging_dict['dice_label_'+str(label_n)] = dice[label_n]
logging_dict['softdice_label_'+str(label_n)] = soft_dice[label_n]
logging_dict['ce_label_'+str(label_n)] = F.cross_entropy(pred, target[:,0].long(), reduction='sum', weight=hotlabel)
if state == "val" and (label_n>1):
true_lab = (label_n in target)
pred_lab = (label_n in pred_mask)
logging_dict['tp_'+str(label_n)] = float(true_lab and pred_lab)
logging_dict['tn_'+str(label_n)] = float((not true_lab) and (not pred_lab))
logging_dict['fp_'+str(label_n)] = float((not true_lab) and pred_lab)
logging_dict['fn_'+str(label_n)] = float(true_lab and (not pred_lab))
logging_dict['acc_'+str(label_n)] = (logging_dict['tp_'+str(label_n)]+logging_dict['tn_'+str(label_n)])/1
elif self.target_type == "image":
logging_dict['L1'] = F.l1_loss(pred, target)
logging_dict['L2'] = F.mse_loss(pred, target)
logging_dict['SSIM'] = ssim((pred+1)/2, (target+1)/2, data_range=1, nonnegative_ssim=True)
# ----------------- Log Scalars ----------------------
for metric_name, metric_val in logging_dict.items():
self.log(f"{state}/{metric_name}", metric_val.cpu() if hasattr(metric_val, 'cpu') else metric_val, batch_size=batch_size, on_step=True, on_epoch=True)
#------------------ Log Image -----------------------
if (step % self.sample_every_n_steps == 0):
log_step = step // self.sample_every_n_steps
path_out = Path(self.logger.log_dir)/'images'/state
path_out.mkdir(parents=True, exist_ok=True)
if self.target_type == "segmentation":
images = tensor_mask2image(source, pred_onehot)
save_image(images, path_out/f'sample_{log_step}.png', nrow=images.shape[0]//source.shape[1], normalize=True, scale_each=True)
elif self.target_type == "image":
images = torch.cat([tensor2image(img)[:32] for img in (source, target, pred)])
save_image(images, path_out/f'sample_{log_step}.png', nrow=images.shape[0]//3, normalize=True, scale_each=True)
return logging_dict['loss']
def training_step(self, batch: dict, batch_idx: int):
self._step_train += 1
return self._step(batch, batch_idx, "train", self._step_train)
def validation_step(self, batch: dict, batch_idx: int):
self._step_val += 1
return self._step(batch, batch_idx, "val", self._step_val)
def test_step(self, batch: dict, batch_idx: int):
self._step_test += 1
return self._step(batch, batch_idx, "test", self._step_test)
# def training_epoch_end(self, outputs):
# return
# def validation_epoch_end(self, outputs):
# return
# def test_epoch_end(self, outputs):
# return
def configure_optimizers(self):
#optimizer = torch.optim.AdamW(self.parameters(), **self.optimizer_kwargs)
optimizer = self.optimizer(self.parameters(), **self.optimizer_kwargs)
if self.lr_scheduler is not None:
lr_scheduler = self.lr_scheduler(optimizer, **self.lr_scheduler_kwargs)
return [optimizer], [lr_scheduler]
else:
return optimizer
@classmethod
def save_best_checkpoint(cls, path_checkpoint_dir, best_model_path):
with open(Path(path_checkpoint_dir) / 'best_checkpoint.json', 'w') as f:
json.dump({'best_model_epoch': Path(best_model_path).name}, f)
@classmethod
def get_best_checkpoint(cls, path_checkpoint_dir, version=0, **kwargs):
path_version = 'lightning_logs/version_'+str(version)
with open(Path(path_checkpoint_dir) / path_version/ 'best_checkpoint.json', 'r') as f:
path_rel_best_checkpoint = Path(json.load(f)['best_model_epoch'])
return Path(path_checkpoint_dir)/path_rel_best_checkpoint
@classmethod
def load_best_checkpoint(cls, path_checkpoint_dir, version=0, **kwargs):
path_best_checkpoint = cls.get_best_checkpoint(path_checkpoint_dir, version)
return cls.load_from_checkpoint(path_best_checkpoint, **kwargs)
def load_pretrained(self, checkpoint_path, map_location=None, **kwargs):
if checkpoint_path.is_dir():
checkpoint_path = self.get_best_checkpoint(checkpoint_path, **kwargs)
with pl_legacy_patch():
if map_location is not None:
checkpoint = pl_load(checkpoint_path, map_location=map_location)
else:
checkpoint = pl_load(checkpoint_path, map_location=lambda storage, loc: storage)
return self.load_weights(checkpoint["state_dict"], **kwargs)
def load_weights(self, pretrained_weights, **kwargs):
filter = kwargs.get('filter', lambda key:key in pretrained_weights)
init_weights = self.state_dict()
pretrained_weights = {key: value for key, value in pretrained_weights.items() if filter(key)}
init_weights.update(pretrained_weights)
self.load_state_dict(init_weights, strict=True)
return self
| 9,994 | 43.820628 | 169 | py |
TraBS | TraBS-main/breaststudies/models/monai_mods/swin_unetr.py | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Tuple, Type, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from torch.nn import LayerNorm
from monai.networks.blocks import MLPBlock as Mlp
from monai.networks.blocks import UnetOutBlock, UnetrBasicBlock, UnetrUpBlock
from monai.networks.layers import DropPath, trunc_normal_
from monai.utils import ensure_tuple_rep, optional_import
from breaststudies.models.monai_mods.blocks import PatchEmbed
rearrange, _ = optional_import("einops", name="rearrange")
class SwinUNETR(nn.Module):
"""
Swin UNETR based on: "Hatamizadeh et al.,
Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images
<https://arxiv.org/abs/2201.01266>"
"""
def __init__(
self,
img_size: Union[Sequence[int], int],
in_channels: int,
out_channels: int,
patch_sizes: Sequence[Union[Sequence[int], int]] = (2, 2, 2, 2, 2),
kernel_sizes: Sequence[Union[Sequence[int], int]] = (3, 3, 3, 3, 3, 3),
kernel_size_emb: Union[Sequence[int], int] = 2,
depths: Sequence[int] = (2, 2, 2, 2),
num_heads: Sequence[int] = (3, 6, 12, 24),
feature_sizes: Sequence[int] = (24, 24, 48, 96, 192, 384),
norm_name: Union[Tuple, str] = "instance",
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
dropout_path_rate: float = 0.0,
normalize: bool = True,
use_checkpoint: bool = False,
spatial_dims: int = 3,
deep_supervision=False,
use_spacing = False,
) -> None:
"""
Args:
img_size: dimension of input image.
in_channels: dimension of input channels.
out_channels: dimension of output channels.
patch_sizes: patch size in each layer. Determines the down sampling factor in PatchEmbed & PatchMerging and up sampling in transposed convolutions
kernel_sizes: Convolution kernel sizes in each layer. Affects only UNet part. Must be len(patch_sizes)+1
kernel_size_emb: kernel size for patch embeddings (Patch Partition Block). Affects only SwinTransformer
depths: number of SwinTransformerBlocks in each layer of the the SwinTransformer. Must be len(patch_sizes)-1
num_heads: number of attention heads in each layer SwinTransformerBlock. Must be len(patch_sizes)-1
feature_sizes: Number of features/channels in each layer of the SwinTransformer and UNet. Must be len(patch_sizes)+1
norm_name: feature normalization type and arguments.
drop_rate: dropout rate.
attn_drop_rate: attention dropout rate.
dropout_path_rate: drop path rate.
normalize: normalize output intermediate features in each stage.
use_checkpoint: use gradient checkpointing for reduced memory usage.
spatial_dims: number of spatial dims.
deep_supervision: Predict output for deep layers - allows deep supervision
use_spacing: Use spacing Information as additional input information
Examples::
# for 3D single channel input with size (96,96,96), 4-channel output and feature size of 48.
>>> net = SwinUNETR(img_size=(96,96,96), in_channels=1, out_channels=4, feature_size=48)
# for 3D 4-channel input with size (128,128,128), 3-channel output and (2,4,2,2) layers in each stage.
>>> net = SwinUNETR(img_size=(128,128,128), in_channels=4, out_channels=3, depths=(2,4,2,2))
# for 2D single channel input with size (96,96), 2-channel output and gradient checkpointing.
>>> net = SwinUNETR(img_size=(96,96), in_channels=3, out_channels=2, use_checkpoint=True, spatial_dims=2)
"""
super().__init__()
self.img_size = ensure_tuple_rep(img_size, spatial_dims)
window_size = ensure_tuple_rep(7, spatial_dims)
if not (spatial_dims == 2 or spatial_dims == 3):
raise ValueError("spatial dimension should be 2 or 3.")
# for m, p in zip(self.img_size, patch_size):
# for i in range(5):
# if m % np.power(p, i + 1) != 0:
# raise ValueError("input image size (self.img_size) should be divisible by stage-wise image resolution.")
if not (0 <= drop_rate <= 1):
raise ValueError("dropout rate should be between 0 and 1.")
if not (0 <= attn_drop_rate <= 1):
raise ValueError("attention dropout rate should be between 0 and 1.")
if not (0 <= dropout_path_rate <= 1):
raise ValueError("drop path rate should be between 0 and 1.")
# if feature_size % 12 != 0:
# raise ValueError("feature_size should be divisible by 12.")
self.normalize = normalize
self.use_spacing = use_spacing
if isinstance(deep_supervision, bool):
self.deep_supervision = len(feature_sizes)-2 if deep_supervision else 0
else:
self.deep_supervision = deep_supervision
Transformer = SwinTransformer2 if use_spacing else SwinTransformer
self.swinViT = Transformer(
in_chans=in_channels,
embed_dim=feature_sizes[1:],
window_size=window_size,
kernel_size=kernel_size_emb,
patch_sizes=patch_sizes,
depths=depths,
num_heads=num_heads,
mlp_ratio=4.0,
qkv_bias=True,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dropout_path_rate,
norm_layer=nn.LayerNorm,
use_checkpoint=use_checkpoint,
spatial_dims=spatial_dims,
)
self.encoder_in = UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=in_channels,
out_channels=feature_sizes[0],
kernel_size=kernel_sizes[0],
stride=1,
norm_name=norm_name,
res_block=True,
)
self.encoders = nn.ModuleList([
UnetrBasicBlock(
spatial_dims=spatial_dims,
in_channels=feature_size,
out_channels=feature_size,
kernel_size=kernel_size,
stride=1,
norm_name=norm_name,
res_block=True,
)
for feature_size, kernel_size in zip(feature_sizes[1:], kernel_sizes[1:])
])
self.decoders = nn.ModuleList([
UnetrUpBlock(
spatial_dims=spatial_dims,
in_channels=feature_sizes[i+1],
out_channels=feature_sizes[i],
kernel_size=kernel_sizes[i],
upsample_kernel_size=patch_sizes[i], # Is also used as stride in Transposed-Conv! Must match patch_sizes
norm_name=norm_name,
res_block=True,
)
for i in range(len(kernel_sizes)-1)
])
self.out_ver = nn.ModuleList([
UnetOutBlock(spatial_dims=spatial_dims, in_channels=feature_sizes[n], out_channels=out_channels)
for n in range(1, self.deep_supervision+1)
])
self.out = UnetOutBlock(
spatial_dims=spatial_dims, in_channels=feature_sizes[0], out_channels=out_channels
) # type: ignore
def forward(self, x_in, return_hidden_states=False, **kwargs):
hidden_states_out = self.swinViT(x_in, normalize=self.normalize, **kwargs)
encs = [self.encoder_in(x_in)]
for encoder, hidden in zip(self.encoders, hidden_states_out):
encs.append(encoder(hidden))
dec = encs.pop(-1)
out_ver = []
for i, (decoder, enc) in enumerate(zip(reversed(self.decoders), reversed(encs))):
dec = decoder(dec, enc)
v = len(self.decoders)-i-1
if (v <= self.deep_supervision) and (v>0):
out_ver.append(self.out_ver[v-1](dec))
logits = self.out(dec)
out_ver = out_ver[::-1]
if return_hidden_states:
return logits, out_ver, hidden_states_out
else:
return logits, out_ver
def window_partition(x, window_size):
"""window partition operation based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
x: input tensor.
window_size: local window size.
"""
x_shape = x.size()
if len(x_shape) == 5:
b, d, h, w, c = x_shape
x = x.view(
b,
d // window_size[0],
window_size[0],
h // window_size[1],
window_size[1],
w // window_size[2],
window_size[2],
c,
)
windows = (
x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, window_size[0] * window_size[1] * window_size[2], c)
)
elif len(x_shape) == 4:
b, h, w, c = x.shape
x = x.view(b, h // window_size[0], window_size[0], w // window_size[1], window_size[1], c)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0] * window_size[1], c)
return windows
def window_reverse(windows, window_size, dims):
"""window reverse operation based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
windows: windows tensor.
window_size: local window size.
dims: dimension values.
"""
if len(dims) == 4:
b, d, h, w = dims
x = windows.view(
b,
d // window_size[0],
h // window_size[1],
w // window_size[2],
window_size[0],
window_size[1],
window_size[2],
-1,
)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(b, d, h, w, -1)
elif len(dims) == 3:
b, h, w = dims
x = windows.view(b, h // window_size[0], w // window_size[0], window_size[0], window_size[1], -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)
return x
def get_window_size(x_size, window_size, shift_size=None):
"""Computing window size based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
x_size: input size.
window_size: local window size.
shift_size: window shifting size.
"""
use_window_size = list(window_size)
if shift_size is not None:
use_shift_size = list(shift_size)
for i in range(len(x_size)):
if x_size[i] <= window_size[i]:
use_window_size[i] = x_size[i]
if shift_size is not None:
use_shift_size[i] = 0
if shift_size is None:
return tuple(use_window_size)
else:
return tuple(use_window_size), tuple(use_shift_size)
class WindowAttention(nn.Module):
"""
Window based multi-head self attention module with relative position bias based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: Sequence[int],
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
"""
Args:
dim: number of feature channels.
num_heads: number of attention heads.
window_size: local window size.
qkv_bias: add a learnable bias to query, key, value.
attn_drop: attention dropout rate.
proj_drop: dropout rate of output.
"""
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
mesh_args = torch.meshgrid.__kwdefaults__
if len(self.window_size) == 3:
self.relative_position_bias_table = nn.Parameter(
torch.zeros(
(2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1),
num_heads,
)
)
coords_d = torch.arange(self.window_size[0])
coords_h = torch.arange(self.window_size[1])
coords_w = torch.arange(self.window_size[2])
if mesh_args is not None:
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w, indexing="ij"))
else:
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 2] += self.window_size[2] - 1
relative_coords[:, :, 0] *= (2 * self.window_size[1] - 1) * (2 * self.window_size[2] - 1)
relative_coords[:, :, 1] *= 2 * self.window_size[2] - 1
elif len(self.window_size) == 2:
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
)
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
if mesh_args is not None:
coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij"))
else:
coords = torch.stack(torch.meshgrid(coords_h, coords_w))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += self.window_size[0] - 1
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1)
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask):
b, n, c = x.shape
qkv = self.qkv(x).reshape(b, n, 3, self.num_heads, c // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.clone()[:n, :n].reshape(-1)
].reshape(n, n, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nw = mask.shape[0]
attn = attn.view(b // nw, nw, self.num_heads, n, n) + mask.unsqueeze(1).unsqueeze(0) # TODO: Why is attention masked added and not multiplied (is the addition of -100 reasonable?)
attn = attn.view(-1, self.num_heads, n, n)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(b, n, c)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
"""
Swin Transformer block based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
dim: int,
num_heads: int,
window_size: Sequence[int],
shift_size: Sequence[int],
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
drop: float = 0.0,
attn_drop: float = 0.0,
drop_path: float = 0.0,
act_layer: str = "GELU",
norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore
use_checkpoint: bool = False,
) -> None:
"""
Args:
dim: number of feature channels.
num_heads: number of attention heads.
window_size: local window size.
shift_size: window shift size.
mlp_ratio: ratio of mlp hidden dim to embedding dim.
qkv_bias: add a learnable bias to query, key, value.
drop: dropout rate.
attn_drop: attention dropout rate.
drop_path: stochastic depth rate.
act_layer: activation layer.
norm_layer: normalization layer.
use_checkpoint: use gradient checkpointing for reduced memory usage.
"""
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.use_checkpoint = use_checkpoint
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=self.window_size,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(hidden_size=dim, mlp_dim=mlp_hidden_dim, act=act_layer, dropout_rate=drop, dropout_mode="swin")
def forward_part1(self, x, mask_matrix):
x_shape = x.size()
x = self.norm1(x)
if len(x_shape) == 5:
b, d, h, w, c = x.shape
window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size)
pad_l = pad_t = pad_d0 = 0
pad_d1 = (window_size[0] - d % window_size[0]) % window_size[0]
pad_b = (window_size[1] - h % window_size[1]) % window_size[1]
pad_r = (window_size[2] - w % window_size[2]) % window_size[2]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1))
_, dp, hp, wp, _ = x.shape
dims = [b, dp, hp, wp]
elif len(x_shape) == 4:
b, h, w, c = x.shape
window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size)
pad_l = pad_t = 0
pad_r = (window_size[0] - h % window_size[0]) % window_size[0]
pad_b = (window_size[1] - w % window_size[1]) % window_size[1]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, hp, wp, _ = x.shape
dims = [b, hp, wp]
if any(i > 0 for i in shift_size):
if len(x_shape) == 5:
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
elif len(x_shape) == 4:
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1]), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
x_windows = window_partition(shifted_x, window_size)
attn_windows = self.attn(x_windows, mask=attn_mask)
attn_windows = attn_windows.view(-1, *(window_size + (c,)))
shifted_x = window_reverse(attn_windows, window_size, dims)
if any(i > 0 for i in shift_size):
if len(x_shape) == 5:
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
elif len(x_shape) == 4:
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1]), dims=(1, 2))
else:
x = shifted_x
if len(x_shape) == 5:
if pad_d1 > 0 or pad_r > 0 or pad_b > 0:
x = x[:, :d, :h, :w, :].contiguous()
elif len(x_shape) == 4:
if pad_r > 0 or pad_b > 0:
x = x[:, :h, :w, :].contiguous()
return x
def forward_part2(self, x):
return self.drop_path(self.mlp(self.norm2(x)))
# def load_from(self, weights, n_block, layer):
# root = f"module.{layer}.0.blocks.{n_block}."
# block_names = [
# "norm1.weight",
# "norm1.bias",
# "attn.relative_position_bias_table",
# "attn.relative_position_index",
# "attn.qkv.weight",
# "attn.qkv.bias",
# "attn.proj.weight",
# "attn.proj.bias",
# "norm2.weight",
# "norm2.bias",
# "mlp.fc1.weight",
# "mlp.fc1.bias",
# "mlp.fc2.weight",
# "mlp.fc2.bias",
# ]
# with torch.no_grad():
# self.norm1.weight.copy_(weights["state_dict"][root + block_names[0]])
# self.norm1.bias.copy_(weights["state_dict"][root + block_names[1]])
# self.attn.relative_position_bias_table.copy_(weights["state_dict"][root + block_names[2]])
# self.attn.relative_position_index.copy_(weights["state_dict"][root + block_names[3]])
# self.attn.qkv.weight.copy_(weights["state_dict"][root + block_names[4]])
# self.attn.qkv.bias.copy_(weights["state_dict"][root + block_names[5]])
# self.attn.proj.weight.copy_(weights["state_dict"][root + block_names[6]])
# self.attn.proj.bias.copy_(weights["state_dict"][root + block_names[7]])
# self.norm2.weight.copy_(weights["state_dict"][root + block_names[8]])
# self.norm2.bias.copy_(weights["state_dict"][root + block_names[9]])
# self.mlp.linear1.weight.copy_(weights["state_dict"][root + block_names[10]])
# self.mlp.linear1.bias.copy_(weights["state_dict"][root + block_names[11]])
# self.mlp.linear2.weight.copy_(weights["state_dict"][root + block_names[12]])
# self.mlp.linear2.bias.copy_(weights["state_dict"][root + block_names[13]])
def forward(self, x, mask_matrix):
shortcut = x
if self.use_checkpoint:
x = checkpoint.checkpoint(self.forward_part1, x, mask_matrix)
else:
x = self.forward_part1(x, mask_matrix)
x = shortcut + self.drop_path(x)
if self.use_checkpoint:
x = x + checkpoint.checkpoint(self.forward_part2, x)
else:
x = x + self.forward_part2(x)
return x
class PatchMerging(nn.Module):
"""
Patch merging layer based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self, dim: int, norm_layer: Type[LayerNorm] = nn.LayerNorm, spatial_dims: int = 3, patch_size: Sequence[int] = 2,
) -> None: # type: ignore
"""
Args:
dim: number of feature channels.
norm_layer: normalization layer.
spatial_dims: number of spatial dims.
"""
super().__init__()
self.dim = dim
self.patch_size = ensure_tuple_rep(patch_size, spatial_dims)
patch_vol = np.prod(self.patch_size)
patch_length = int(np.round(np.power(patch_vol,1/spatial_dims))) # NOTE: For non isotropic it's only an approximation
if spatial_dims == 3:
self.reduction = nn.Linear(patch_vol * dim, patch_length * dim, bias=False)
self.norm = norm_layer(patch_vol * dim)
elif spatial_dims == 2:
self.reduction = nn.Linear(patch_vol * dim, patch_length * dim, bias=False)
self.norm = norm_layer(patch_vol * dim)
def forward(self, x):
x_shape = x.size()
if len(x_shape) == 5:
_, d, h, w, _ = x_shape
pd,ph,pw = self.patch_size
pad_input = (d % pd == 1) or (h % ph == 1) or (w % pw == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, w % pw, 0, h % ph, 0, d % pd))
feat_groups = []
for offset_d in range(pd):
for offset_h in range(ph):
for offset_w in range(pw):
feat_groups.append(x[:, offset_d::pd, offset_h::ph, offset_w::pw, :])
x = torch.cat(feat_groups, -1)
elif len(x_shape) == 4:
_, h, w, _ = x_shape
ph,pw = self.patch_size
pad_input = (h % ph == 1) or (w % pw == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, w % pw, 0, h % ph))
feat_groups = []
for offset_h in range(ph):
for offset_w in range(pw):
feat_groups.append(x[:, offset_h::ph, offset_w::pw, :])
x = torch.cat(feat_groups, -1)
x = self.norm(x)
x = self.reduction(x)
return x
def compute_mask(dims, window_size, shift_size, device):
"""Computing region masks based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Args:
dims: dimension values.
window_size: local window size.
shift_size: shift size.
device: device.
"""
cnt = 0
if len(dims) == 3:
d, h, w = dims
img_mask = torch.zeros((1, d, h, w, 1), device=device)
for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):
for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):
for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None):
img_mask[:, d, h, w, :] = cnt
cnt += 1
elif len(dims) == 2:
h, w = dims
img_mask = torch.zeros((1, h, w, 1), device=device)
for h in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):
for w in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, window_size)
mask_windows = mask_windows.squeeze(-1)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
class BasicLayer(nn.Module):
"""
Basic Swin Transformer layer in one stage based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
dim: int,
depth: int,
num_heads: int,
window_size: Sequence[int],
patch_size: Sequence[int],
drop_path: list,
mlp_ratio: float = 4.0,
qkv_bias: bool = False,
drop: float = 0.0,
attn_drop: float = 0.0,
norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore
downsample: isinstance = None, # type: ignore
use_checkpoint: bool = False,
) -> None:
"""
Args:
dim: number of feature channels.
depths: number of layers in each stage.
num_heads: number of attention heads.
window_size: local window size.
patch_size: patch size.
drop_path: stochastic depth rate.
mlp_ratio: ratio of mlp hidden dim to embedding dim.
qkv_bias: add a learnable bias to query, key, value.
drop: dropout rate.
attn_drop: attention dropout rate.
norm_layer: normalization layer.
downsample: downsample layer at the end of the layer.
use_checkpoint: use gradient checkpointing for reduced memory usage.
"""
super().__init__()
self.window_size = window_size
self.shift_size = tuple(i // 2 for i in window_size)
self.no_shift = tuple(0 for i in window_size)
self.depth = depth
self.use_checkpoint = use_checkpoint
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=self.window_size,
shift_size=self.no_shift if (i % 2 == 0) else self.shift_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
use_checkpoint=use_checkpoint,
)
for i in range(depth)
]
)
self.downsample = downsample
if self.downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer, spatial_dims=len(self.window_size),
patch_size=patch_size)
def forward(self, x):
x_shape = x.size()
if len(x_shape) == 5:
b, c, d, h, w = x_shape
window_size, shift_size = get_window_size((d, h, w), self.window_size, self.shift_size)
x = rearrange(x, "b c d h w -> b d h w c")
dp = int(np.ceil(d / window_size[0])) * window_size[0]
hp = int(np.ceil(h / window_size[1])) * window_size[1]
wp = int(np.ceil(w / window_size[2])) * window_size[2]
attn_mask = compute_mask([dp, hp, wp], window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
x = x.view(b, d, h, w, -1)
if self.downsample is not None:
x = self.downsample(x)
x = rearrange(x, "b d h w c -> b c d h w")
elif len(x_shape) == 4:
b, c, h, w = x_shape
window_size, shift_size = get_window_size((h, w), self.window_size, self.shift_size)
x = rearrange(x, "b c h w -> b h w c")
hp = int(np.ceil(h / window_size[0])) * window_size[0]
wp = int(np.ceil(w / window_size[1])) * window_size[1]
attn_mask = compute_mask([hp, wp], window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
x = x.view(b, h, w, -1)
if self.downsample is not None:
x = self.downsample(x)
x = rearrange(x, "b h w c -> b c h w")
return x
class SwinTransformer(nn.Module):
"""
Swin Transformer based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
"""
def __init__(
self,
in_chans: int,
embed_dim: Sequence[int],
window_size: Sequence[int],
kernel_size: Union[Sequence[int], int],
patch_sizes: Sequence[Union[Sequence[int], int]],
depths: Sequence[int],
num_heads: Sequence[int],
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
drop_rate: float = 0.0,
attn_drop_rate: float = 0.0,
drop_path_rate: float = 0.0,
norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore
patch_norm: bool = False,
use_checkpoint: bool = False,
spatial_dims: int = 3,
) -> None:
"""
Args:
in_chans: dimension of input channels.
embed_dim: number of linear projection output channels.
window_size: local window size.
kernel_size: kernel size for patch embeddings
patch_sizes: patch size in each stage.
depths: number of layers in each stage.
num_heads: number of attention heads.
mlp_ratio: ratio of mlp hidden dim to embedding dim.
qkv_bias: add a learnable bias to query, key, value.
drop_rate: dropout rate.
attn_drop_rate: attention dropout rate.
drop_path_rate: stochastic depth rate.
norm_layer: normalization layer.
patch_norm: add normalization after patch embedding.
use_checkpoint: use gradient checkpointing for reduced memory usage.
spatial_dims: spatial dimension.
"""
super().__init__()
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.window_size = window_size
self.kernel_size = kernel_size
self.patch_sizes = patch_sizes
self.patch_embed = PatchEmbed(
kernel_size = self.kernel_size,
patch_size=patch_sizes[0],
in_chans=in_chans,
embed_dim=embed_dim[0],
norm_layer=norm_layer if self.patch_norm else None, # type: ignore
spatial_dims=spatial_dims,
)
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
self.layers = nn.ModuleList([
BasicLayer(
dim=embed_dim[i_layer],
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=self.window_size,
patch_size=patch_sizes[i_layer+1],
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
drop=drop_rate,
attn_drop=attn_drop_rate,
norm_layer=norm_layer,
downsample=PatchMerging,
use_checkpoint=use_checkpoint,
)
for i_layer in range(self.num_layers)
])
def proj_out(self, x, normalize=False):
if normalize:
x_shape = x.size()
if len(x_shape) == 5:
n, ch, d, h, w = x_shape
x = rearrange(x, "n c d h w -> n d h w c")
x = F.layer_norm(x, [ch])
x = rearrange(x, "n d h w c -> n c d h w")
elif len(x_shape) == 4:
n, ch, h, w = x_shape
x = rearrange(x, "n c h w -> n h w c")
x = F.layer_norm(x, [ch])
x = rearrange(x, "n h w c -> n c h w")
return x
def forward(self, x, normalize=True, **kwargs):
x = self.patch_embed(x)
x = self.pos_drop(x)
x_out = [self.proj_out(x, normalize)]
for layer in self.layers:
x = layer(x.contiguous())
x_out.append(self.proj_out(x, normalize))
return x_out
class SwinTransformer2(SwinTransformer):
def __init__(self, in_chans: int, embed_dim: int, window_size: Sequence[int], kernel_size: Union[Sequence[int], int], patch_sizes: Sequence[Union[Sequence[int], int]], depths: Sequence[int], num_heads: Sequence[int], mlp_ratio: float = 4, qkv_bias: bool = True, drop_rate: float = 0, attn_drop_rate: float = 0, drop_path_rate: float = 0, norm_layer: Type[LayerNorm] = nn.LayerNorm, patch_norm: bool = False, use_checkpoint: bool = False, spatial_dims: int = 3) -> None:
super().__init__(in_chans, embed_dim, window_size, kernel_size, patch_sizes, depths, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate, drop_path_rate, norm_layer, patch_norm, use_checkpoint, spatial_dims)
self.spacing_embed = nn.Sequential(nn.Linear(3, embed_dim[0]), nn.ReLU(inplace=True))
# self.spacing_embed = nn.Embedding(1000, embed_dim[0])
def forward(self, x, spacing, normalize=True):
# Image Embedding
x = self.patch_embed(x) # [B, Emb, D,H,W]
# Spacing Embedding
# dic_spacing = torch.round(spacing*2)/2 # round to nearest 0.5
# dic_spacing = dic_spacing/0.5 # -> assuming spacing between [0 5)mm -> [0, 1, ..., 9)
# spacing = dic_spacing[0]+dic_spacing[1]*10+dic_spacing[2]*100
s = self.spacing_embed(spacing) # [B, Emb]
# Combine Image+Spacing
# x0 = self.embed_fusion(torch.concat([s0,x0], dim=1))
x = x+s[:,:,None,None,None]
x = self.pos_drop(x)
x_out = [self.proj_out(x, normalize)]
for layer in self.layers:
x = layer(x.contiguous())
x_out.append(self.proj_out(x, normalize))
return x_out | 38,039 | 39.947255 | 473 | py |
TraBS | TraBS-main/breaststudies/models/monai_mods/blocks.py | from typing import Sequence, Type, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import LayerNorm
from monai.networks.layers import Conv, trunc_normal_
from monai.utils import ensure_tuple_rep, optional_import
from monai.utils.module import look_up_option
Rearrange, _ = optional_import("einops.layers.torch", name="Rearrange")
SUPPORTED_EMBEDDING_TYPES = {"conv", "perceptron"}
class PatchEmbed(nn.Module):
"""
Patch embedding block based on: "Liu et al.,
Swin Transformer: Hierarchical Vision Transformer using Shifted Windows
<https://arxiv.org/abs/2103.14030>"
https://github.com/microsoft/Swin-Transformer
Unlike ViT patch embedding block: (1) input is padded to satisfy window size requirements (2) normalized if
specified (3) position embedding is not used.
Example::
>>> from monai.networks.blocks import PatchEmbed
>>> PatchEmbed(patch_size=2, in_chans=1, embed_dim=48, norm_layer=nn.LayerNorm, spatial_dims=3)
"""
def __init__(
self,
kernel_size: Union[Sequence[int], int] = 2,
patch_size: Union[Sequence[int], int] = 2,
in_chans: int = 1,
embed_dim: int = 48,
norm_layer: Type[LayerNorm] = nn.LayerNorm, # type: ignore
spatial_dims: int = 3,
) -> None:
"""
Args:
patch_size: dimension of patch size.
in_chans: dimension of input channels.
embed_dim: number of linear projection output channels.
norm_layer: normalization layer.
spatial_dims: spatial dimension.
"""
super().__init__()
if not (spatial_dims == 2 or spatial_dims == 3):
raise ValueError("spatial dimension should be 2 or 3.")
kernel_size = ensure_tuple_rep(kernel_size, spatial_dims)
patch_size = ensure_tuple_rep(patch_size, spatial_dims)
self.kernel_size = kernel_size
self.patch_size = patch_size
self.embed_dim = embed_dim
self.proj = Conv[Conv.CONV, spatial_dims](
in_channels=in_chans, out_channels=embed_dim, kernel_size=kernel_size, stride=patch_size
)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
x_shape = x.size()
if len(x_shape) == 5:
d, h, w = x_shape[2:]
pd,ph,pw = (a-b for a,b in zip(self.kernel_size, self.patch_size))
if w % self.patch_size[2] != 0:
x = F.pad(x, (0, self.patch_size[2] - w % self.patch_size[2]))
if h % self.patch_size[1] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[1] - h % self.patch_size[1]))
if d % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - d % self.patch_size[0]))
if any([pd>0, ph>0, pw>0]):
x = F.pad(x, (0, pw, 0, ph, 0, pd))
elif len(x_shape) == 4:
h, w = (a-b for a,b in zip(x_shape[2:],self.kernel_size))
if w % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - w % self.patch_size[1]))
if h % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - h % self.patch_size[0]))
x = self.proj(x)
if self.norm is not None:
x_shape = x.size()
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
if len(x_shape) == 5:
d, wh, ww = x_shape[2], x_shape[3], x_shape[4]
x = x.transpose(1, 2).view(-1, self.embed_dim, d, wh, ww)
elif len(x_shape) == 4:
wh, ww = x_shape[2], x_shape[3]
x = x.transpose(1, 2).view(-1, self.embed_dim, wh, ww)
return x
| 3,877 | 36.650485 | 111 | py |
TraBS | TraBS-main/breaststudies/augmentation/augmentations.py | from typing import Iterable, Tuple, Union, List, Optional, Sequence, Dict
from numbers import Number
from pathlib import Path
import warnings
from tqdm import tqdm
import numpy as np
import nibabel as nib
import torch
import torchio as tio
from torchio import Subject, RandomAffine, IntensityTransform, CropOrPad, Resample
from torchio import TYPE, INTENSITY, LABEL
from torchio.typing import TypeRangeFloat, TypeSextetFloat, TypeTripletFloat, TypeTripletInt, TypePath, TypeCallable
from torchio.transforms.transform import TypeMaskingMethod
from torchio.transforms.augmentation import RandomTransform
from torchio.transforms.augmentation.spatial.random_affine import _parse_default_value
from torchio.utils import to_tuple
from torchio.transforms import SpatialTransform, Compose, Transform, HistogramStandardization
from breaststudies.augmentation.helper_functions import augment_linear_downsampling_scipy, augment_contrast, resample_patient
from breaststudies.utils import get_affine2
TypeOneToSixFloat = Union[TypeRangeFloat, TypeTripletFloat, TypeSextetFloat]
def parse_per_channel(per_channel, channels):
if isinstance(per_channel, bool):
if per_channel == True:
return [(ch,) for ch in range(channels)]
else:
return [tuple(ch for ch in range(channels))]
else:
return per_channel
class ZNormalization(tio.ZNormalization):
"""Add option 'per_channel' to apply znorm for each channel independently and percentiles to clip values first"""
def __init__(
self,
percentiles: TypeRangeFloat = (0, 100),
per_channel=True,
masking_method: TypeMaskingMethod = None,
**kwargs
):
super().__init__(masking_method=masking_method, **kwargs)
self.percentiles = percentiles
self.per_channel = per_channel
def apply_normalization(
self,
subject: Subject,
image_name: str,
mask: torch.Tensor,
) -> None:
image = subject[image_name]
per_channel = parse_per_channel(self.per_channel, image.shape[0])
image.set_data(torch.cat([
self._znorm(image.data[chs,], mask[chs,], image_name, image.path)
for chs in per_channel ])
)
def _znorm(self, image_data, mask, image_name, image_path):
# NOTE: torch not reliable: "failed to apply transformation: quantile() input tensor is too large"
# cutoff2 = torch.quantile(image_data.masked_select(mask).float(), torch.tensor(self.percentiles)/100.0)
# torch.clamp(image_data, *cutoff.to(image_data.dtype).tolist(), out=image_data)
cutoff = np.percentile(image_data[mask], self.percentiles)
np.clip(image_data, *cutoff, out=image_data.numpy()) # type: ignore[call-overload]
standardized = self.znorm(image_data, mask)
if standardized is None:
message = (
'Standard deviation is 0 for masked values'
f' in image "{image_name}" ({image_path})'
)
raise RuntimeError(message)
return standardized
class RescaleIntensity(tio.RescaleIntensity):
"""Add option 'per_channel' to apply rescale for each channel independently"""
def __init__(
self,
out_min_max: TypeRangeFloat = (0, 1),
percentiles: TypeRangeFloat = (0, 100),
masking_method: TypeMaskingMethod = None,
in_min_max: Optional[Tuple[float, float]] = None,
per_channel=True, # Bool or List of tuples containing channel indices that should be normalized together
**kwargs
):
super().__init__(out_min_max, percentiles, masking_method, in_min_max, **kwargs)
self.per_channel=per_channel
def apply_normalization(
self,
subject: Subject,
image_name: str,
mask: torch.Tensor,
) -> None:
image = subject[image_name]
per_channel = parse_per_channel(self.per_channel, image.shape[0])
image.set_data(torch.cat([
self.rescale(image.data[chs,], mask[chs,], image_name)
for chs in per_channel ])
)
class RandomCutOut(RandomTransform):
"""Cuts out rectangular boxes with random size and position and fill them with noise"""
def __init__(self, patch_max, patch_min=None, patch_margin=None, patch_per='channel', fill_per_channel=True, fill='random_image_values', fill_label=0, **kwargs):
super().__init__(**kwargs)
self.patch_max = np.asarray(patch_max) # Maximum patch sizes of shape [W, H, D]
self.patch_min = np.asarray(patch_max if patch_min is None else patch_min) # Default to patch_max
self.patch_margin = np.asarray(self.patch_min if patch_margin is None else patch_margin) # Default to patch_min
self.patch_per = patch_per # Equal random patches on 'subject', 'image', or 'channel' level
self.fill_per_channel = fill_per_channel # True - compute fill value on channel level, otherwise on image level
self.fill = fill
self.fill_label = fill_label
def apply_transform(self, subject: Subject) -> Subject:
patches = None
if self.patch_per == 'subject':
patches = self.get_patches(subject.spatial_shape)
for image in subject.get_images(intensity_only=False, **self.add_include_exclude({})):
if self.patch_per == 'image':
patches = self.get_patches(image.shape[1:])
if image[TYPE] == INTENSITY:
new_image = self.fill_rect(image.data, patches, fill=self.fill)
elif image[TYPE] == LABEL:
new_image = self.fill_rect(image.data, patches, fill=self.fill_label)
image = new_image
return subject
def fill_rect(self, tensor, patches, fill):
per_channel = parse_per_channel(self.fill_per_channel, tensor.shape[0])
for ch in per_channel:
if self.patch_per == 'channel':
patches = self.get_patches(tensor.shape[1:])
for patch_i, patch in enumerate(patches):
patch_slice = tuple(slice(pos_a, pos_a+shape_a) for pos_a, shape_a in zip(patch[0], patch[1]))
tensor[ch][patch_slice] = self.get_fill_value(tensor[ch], patch_i, patch, fill)
return tensor
@staticmethod
def get_fill_value(tensor, patch_i, patch, fill):
if fill == 'noise':
mean = torch.mean(tensor)
std = torch.std(tensor)
fill_value = torch.normal(mean, std, size=tuple(patch[1]))
elif fill == 'random_image_values':
fill_value = np.random.choice(tensor.flatten(), size=patch[1]) # very slow
# fill_value = fill[np.sum(n_elm[0:patch_i]): np.sum(n_elm[0:patch_i+1]) ].reshape(patch[1])
elif isinstance(fill, Number):
fill_value = fill
elif isinstance(fill, callable):
fill_value = fill(tensor, patch)
else:
assert f"Parameter 'fill' received invalid value: {fill}"
return torch.tensor(fill_value)
def get_patches(self, spatial_shape: np.ndarray) -> List[Tuple[TypeTripletInt, TypeTripletInt]]:
# Calculate how many patches (including margin) fit into image (at most)
patch_frame = self.patch_max+self.patch_margin
num_patches = np.floor_divide(spatial_shape, patch_frame)
# offset = np.floor_divide(spatial_shape % patch_size, 2) # Start first patch so that patches are centered in image
offset = np.random.randint(spatial_shape % patch_frame+1) # Start first patch randomly between 0 and remaining space
# Scale each patch within given bounds - [Patches, shape-(x,y,z)]
patch_sizes = np.asarray([np.random.randint(self.patch_min, self.patch_max+1) for _ in range(np.prod(num_patches))])
# Calculate left(x),lower(y),front(z) corner of each patch
idx_enc = np.cumprod(np.array([1, *num_patches[:-1]])) # Index encoder
patch_pos = [ [ [ [
tuple(offset[a]+v*patch_frame[a]+(patch_frame[a]-patch_sizes[idx,a])//2 for a, v in enumerate([ix,iy,iz]))
for idx in (np.matmul([ix,iy,iz], idx_enc),)] # Note: only workaround to store variable idx here
for ix in range(0, num_patches[0])]
for iy in range(0, num_patches[1])]
for iz in range(0, num_patches[2])]
patch_pos = np.asarray(patch_pos).reshape((-1, 3)) # [Patches, corner position-(x,y,z)],
patches = np.stack([patch_pos, patch_sizes], axis=1) # # [Patches, position/size, (x,y,z)]
return patches # [patches, 2, 3]
class Brightness(RandomTransform):
def __init__(self, scale, per_channel=True, **kwargs):
super().__init__(**kwargs)
self.per_channel = per_channel
self.scale = scale
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=True):
if self.per_channel:
new_data = []
for ch_data in image.data:
new_data.append(ch_data * np.random.uniform(low=self.scale[0], high=self.scale[1]))
image.set_data(np.stack(new_data))
else:
image.set_data(image.data * np.random.uniform(low=self.scale[0], high=self.scale[1]))
return subject
class RandomDisableChannel(RandomTransform):
def __init__(self, channels, disable_per='subject', **kwargs):
super().__init__(**kwargs)
self.channels = np.array(channels)
self.disable_per = disable_per
def apply_transform(self, subject: Subject) -> Subject:
if self.disable_per == 'subject':
disable_channel = np.random.choice(self.channels)
for image in subject.get_images(intensity_only=True):
assert image.shape[0] >= len(self.channels), f"Image has only {image.shape[0]} channel, but {len(self.channels)} are disabled"
if self.disable_per == 'image':
disable_channel = np.random.choice(self.channels)
new_data = []
for ch, ch_data in enumerate(image.data):
if ch == disable_channel:
new_data.append(ch_data*0)
else:
new_data.append(ch_data)
image.set_data(np.stack(new_data))
return subject
class SelectRandomChannel(Transform):
"""Select Random Channel"""
def apply_transform(self, subject: Subject) -> Subject:
images = subject.get_images(
intensity_only=True, # WARNING: only apply to ScalarImages
include=self.include,
exclude=self.exclude,
)
ch = torch.randint(images[0].shape[0], (1,)) # WARNING, assumes all images have same number of channels
for image in images:
image.set_data(image.data[ch])
return subject
class AddBlankChannel(RandomTransform):
def __init__(self, channel, **kwargs):
super().__init__(**kwargs)
self.channel = channel
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=True):
new_data = []
for ch, ch_data in enumerate(image.data):
if ch == self.channel:
new_data.append(ch_data*0)
new_data.append(ch_data)
if self.channel ==image.data.shape[0]:
new_data.append(ch_data*0)
image.set_data(np.stack(new_data))
return subject
class SpatialTransform2(RandomAffine):
"""Equal to RandomAffine but allows that only two axis instead of three should be transformed in an isotropic manner"""
def __init__(
self,
scales: TypeOneToSixFloat = 0.1,
degrees: TypeOneToSixFloat = 10,
translation: TypeOneToSixFloat = 0,
isotropic: Union[bool, tuple] = False,
center: str = 'image',
default_pad_value: Union[str, float] = 'minimum',
image_interpolation: str = 'linear',
check_shape: bool = True,
**kwargs
):
super().__init__(**kwargs)
self.isotropic = isotropic
self._parse_scales_isotropic(scales, isotropic)
self.scales = self.parse_params(scales, 1, 'scales', min_constraint=0)
self.degrees = self.parse_params(degrees, 0, 'degrees')
self.translation = self.parse_params(translation, 0, 'translation')
if center not in ('image', 'origin'):
message = (
'Center argument must be "image" or "origin",'
f' not "{center}"'
)
raise ValueError(message)
self.center = center
self.default_pad_value = _parse_default_value(default_pad_value)
self.image_interpolation = self.parse_interpolation(
image_interpolation)
self.check_shape = check_shape
def get_params(
self,
scales: TypeSextetFloat,
degrees: TypeSextetFloat,
translation: TypeSextetFloat,
isotropic: Union[bool, tuple],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
scaling_params = self.sample_uniform_sextet(scales)
if isinstance(isotropic, bool) and isotropic:
scaling_params.fill_(scaling_params[0])
elif isinstance(isotropic, tuple): # (0,1),(0,2),(1,2) => x==y, x==z, y==z
scaling_params[isotropic[0]] = scaling_params[isotropic[1]]
rotation_params = self.sample_uniform_sextet(degrees)
translation_params = self.sample_uniform_sextet(translation)
return scaling_params, rotation_params, translation_params
def _parse_scales_isotropic(self, scales, isotropic):
scales = to_tuple(scales)
if isinstance(isotropic, bool):
if isotropic and len(scales) in (3, 6):
message = (
'If "isotropic" is True, the value for "scales" must have'
f' length 1 or 2, but "{scales}" was passed'
)
raise ValueError(message)
elif isinstance(isotropic, tuple):
if len(isotropic)!=2:
raise ValueError("Parameter isotropic must be tuple with 3 booleans")
if isotropic == (0,1): # x==y
if scales[0:2] != scales[2:4]:
raise ValueError("Scales differ in isotropic axis")
elif isotropic == (0,2):
if scales[0:2] != scales[4:6]:
raise ValueError("Scales differ in isotropic axis")
elif isotropic == (1,2):
if scales[2:4] != scales[4:6]:
raise ValueError("Scales differ in isotropic axis")
class PadFixed(tio.Pad):
def apply_transform(self, subject: Subject) -> Subject:
assert self.bounds_parameters is not None
low = self.bounds_parameters[::2]
for image in self.get_images(subject):
new_origin = nib.affines.apply_affine(image.affine, -np.array(low))
new_affine = image.affine.copy()
new_affine[:3, 3] = new_origin
kwargs: Dict[str, Union[str, float]]
if isinstance(self.padding_mode, Number):
kwargs = {
'mode': 'constant',
'constant_values': self.padding_mode,
}
elif isinstance(image, tio.LabelMap): # FIX
kwargs = {
'mode': 'constant',
'constant_values': 0,
}
else:
if self.padding_mode in ['maximum', 'mean', 'median', 'minimum']:
if self.padding_mode == 'maximum':
constant_values = image.data.min()
elif self.padding_mode == 'mean':
constant_values = image.data.to(torch.float).mean().to(image.data.dtype)
elif self.padding_mode == 'median':
constant_values = image.data.median()
elif self.padding_mode == 'minimum':
constant_values = image.data.min()
kwargs = {
'mode': 'constant',
'constant_values': constant_values,
}
else:
kwargs = {'mode': self.padding_mode}
pad_params = self.bounds_parameters
paddings = (0, 0), pad_params[:2], pad_params[2:4], pad_params[4:]
padded = np.pad(image.data, paddings, **kwargs) # type: ignore[call-overload] # noqa: E501
image.set_data(torch.as_tensor(padded))
image.affine = new_affine
return subject
class CropOrPadFixed(tio.CropOrPad):
"""Fixed version of TorchIO CropOrPad:
* Pads with zeros for LabelMaps independent of padding mode (eg. don't pad with mean)
Changes:
* Pads with global (not per axis) 'maximum', 'mean', 'median', 'minimum' if any of these padding modes were selected"""
def apply_transform(self, subject: Subject) -> Subject:
subject.check_consistent_space()
padding_params, cropping_params = self.compute_crop_or_pad(subject)
padding_kwargs = {'padding_mode': self.padding_mode}
if padding_params is not None:
pad = PadFixed(padding_params, **padding_kwargs)
subject = pad(subject) # type: ignore[assignment]
if cropping_params is not None:
crop = tio.Crop(cropping_params)
subject = crop(subject) # type: ignore[assignment]
return subject
class RandomCropOrPad(CropOrPadFixed):
"""CropOrPad but bounding box position is set randomly."""
# Random margins to crop or pad
@staticmethod
def _get_six_bounds_parameters( parameters: np.ndarray) :
result = []
for number in parameters:
ini = np.random.randint(low=0, high=number+1)
fin = number-ini
result.extend([ini, fin])
return tuple(result)
class CropOrPadNone(CropOrPadFixed):
"""CropOrPad enables axis not to be changed by setting to None in target_shape """
def __init__(
self,
target_shape: Union[int, TypeTripletInt, None] = None,
padding_mode: Union[str, float] = 0,
mask_name: Optional[str] = None,
labels: Optional[Sequence[int]] = None,
**kwargs
):
# WARNING: Ugly workaround to allow None values
if target_shape is not None:
self.original_target_shape = to_tuple(target_shape, length=3)
target_shape = [1 if t_s is None else t_s for t_s in target_shape]
super().__init__(target_shape, padding_mode, mask_name, labels, **kwargs)
def apply_transform(self, subject: Subject):
# WARNING: This makes the transformation subject dependent - reverse transformation must be adapted
if self.target_shape is not None:
self.target_shape = [s_s if t_s is None else t_s for t_s, s_s in zip(self.original_target_shape, subject.spatial_shape)]
return super().apply_transform(subject=subject)
class ContrastAugmentationTransform(RandomTransform):
def __init__(self, contrast_range=(0.75, 1.25), preserve_range=True, per_channel=True, p_per_channel=1, **kwargs):
super().__init__(**kwargs)
self.contrast_range = contrast_range
self.preserve_range = preserve_range
self.per_channel = per_channel
self.p_per_channel = p_per_channel
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=True):
new_data = augment_contrast(image.data,
contrast_range=self.contrast_range,
preserve_range=self.preserve_range,
per_channel=self.per_channel,
p_per_channel=self.p_per_channel)
image.set_data(new_data)
return subject
class SimulateLowResolutionTransform(RandomTransform):
def __init__(self, zoom_range=(0.5, 1), per_channel=False, p_per_channel=1, channels=None, order_downsample=1, order_upsample=0, ignore_axes=None, **kwargs):
super().__init__(**kwargs)
self.zoom_range = zoom_range
self.per_channel = per_channel
self.p_per_channel = p_per_channel
self.channels = channels
self.order_downsample = order_downsample
self.order_upsample = order_upsample
self.ignore_axes = ignore_axes
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=True):
new_data = augment_linear_downsampling_scipy(image.data.numpy(), zoom_range=self.zoom_range, per_channel=self.per_channel, p_per_channel=self.per_channel,
channels=self.channels, order_downsample=self.order_downsample, order_upsample=self.order_upsample, ignore_axes=self.ignore_axes)
image.set_data(new_data)
return subject
class Inverse(Transform):
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=True):
image.set_data(-image.data)
return subject
class Trans3Dto2D(Transform):
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=False):
shape = image.data.shape # [C, W,H,D]
image.set_data(np.expand_dims(image.data.reshape((-1, *shape[1:3])), -1))
subject['_3dto2d_depth'] = shape[-1]
return subject
class Trans2Dto3D(Transform):
def apply_transform(self, subject: Subject) -> Subject:
for image in subject.get_images(intensity_only=False):
shape = image.data.shape # [C, W,H,D]
channels = int(shape[0]/subject['_3dto2d_depth'])
image.set_data(np.squeeze(image.data, -1).reshape((channels, *shape[1:3],-1)) )
return subject
class Resample2(SpatialTransform):
"""Resample based on nnUNet implementation."""
def __init__(self, target_spacing, **kwargs):
super().__init__(**kwargs)
self.target_spacing = target_spacing
self.args_names = (
'target_spacing',
)
def apply_transform(self, subject: Subject) -> Subject:
subject['_org_spacing'] = subject.spacing
subject['_org_spatial_shape'] = subject.spatial_shape
for image in subject.get_images(intensity_only=False):
target_spacing = self.target_spacing[::-1]
original_spacing = image.spacing[::-1]
data = np.swapaxes(image.data.numpy(), 1, -1)
image_data = data if image[TYPE] == INTENSITY else None
seg_data = None if image[TYPE] == INTENSITY else data
image_data, seg_data = resample_patient(image_data, seg_data, original_spacing, target_spacing) # Order [(C),D,H,W]
data = image_data if image[TYPE] == INTENSITY else seg_data
data = np.swapaxes(data, 1, -1)
image.set_data(data)
image.affine = get_affine2(self.target_spacing, image.direction, image.origin, lps2ras=False)
return subject
def is_invertible(self):
return True
def inverse(self):
return Resample2Inverse()
class ResampleTio(Resample):
"""Resample is based on the TorchIO implementation, but is reversible"""
def apply_transform(self, subject: Subject) -> Subject:
subject['_org_spacing'] = subject.spacing
subject['_org_spatial_shape'] = subject.spatial_shape
return super().apply_transform(subject)
def is_invertible(self):
return True
def inverse(self):
return ResampleTio2Inverse()
class RandomResample(SpatialTransform):
"""Random resample augmentation"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def apply_transform(self, subject: Subject) -> Subject:
subject['_org_spacing'] = subject.spacing
subject['_org_spatial_shape'] = subject.spatial_shape
xy = torch.rand(1).item()*0.5+0.5 # [0.5, 1.0]
z = np.random.choice([1.5, 2, 2.5, 3])
target_spacing = np.array([xy, xy, z])
return Resample(target_spacing)(subject)
def is_invertible(self):
return True
def inverse(self):
# return Resample2Inverse()
return ResampleTio2Inverse()
class Resample2Inverse(SpatialTransform):
"""Reverse function for Resample2"""
def apply_transform(self, subject: Subject) -> Subject:
return Compose([Resample2(subject['_org_spacing']), CropOrPad(subject['_org_spatial_shape'])])(subject)
class ResampleTio2Inverse(SpatialTransform):
"""Reverse function for ResampleTio"""
def apply_transform(self, subject: Subject) -> Subject:
return Compose([Resample(subject['_org_spacing']), CropOrPad(subject['_org_spatial_shape'])])(subject)
class HistogramStandardization2(HistogramStandardization):
"""TorchIO-HistogramStandardization which allows the input of images instead of paths and skips empty masks."""
DEFAULT_CUTOFF = 0.01, 0.99
STANDARD_RANGE = 0, 100
@classmethod
def train(
cls,
images: Sequence,
mask_name: str = None,
cutoff: Optional[Tuple[float, float]] = None,
output_path: Optional[TypePath] = None,
) -> np.ndarray:
quantiles_cutoff = cls.DEFAULT_CUTOFF if cutoff is None else cutoff
percentiles_cutoff = 100 * np.array(quantiles_cutoff)
percentiles_database = []
percentiles = cls._get_percentiles(percentiles_cutoff)
for i, item in enumerate(tqdm(images)):
array = item['source'].numpy()
if mask_name is not None:
mask = item[mask_name].numpy() > 0
if not mask.any():
print("WARNING: Skipping because no valid label in mask")
continue
if len(array[mask]) < 20:
print("WARNING: Percentile calculation only based on less than 20 values")
percentile_values = np.percentile(array[mask], percentiles)
if percentile_values[0] == percentile_values[-1]:
raise Exception("Percentiles should not be equal")
percentiles_database.append(percentile_values)
percentiles_database = np.vstack(percentiles_database)
mapping = cls._get_average_mapping(percentiles_database)
if output_path is not None:
output_path = Path(output_path).expanduser()
extension = output_path.suffix
if extension == '.txt':
modality = 'image'
text = f'{modality} {" ".join(map(str, mapping))}'
output_path.write_text(text)
elif extension == '.npy':
np.save(output_path, mapping)
return mapping
@classmethod
def _get_percentiles(cls, percentiles_cutoff: Tuple[float, float]) -> np.ndarray:
quartiles = np.arange(25, 100, 25).tolist()
deciles = np.arange(10, 100, 10).tolist()
all_percentiles = list(percentiles_cutoff) + quartiles + deciles
percentiles = sorted(set(all_percentiles))
return np.array(percentiles)
@classmethod
def _get_average_mapping(cls, percentiles_database: np.ndarray) -> np.ndarray:
"""Map the landmarks of the database to the chosen range.
Args:
percentiles_database: Percentiles database over which to perform the
averaging.
"""
# Assuming percentiles_database.shape == (num_data_points, num_percentiles)
pc1 = percentiles_database[:, 0]
pc2 = percentiles_database[:, -1]
s1, s2 = cls.STANDARD_RANGE
slopes = (s2 - s1) / (pc2 - pc1)
slopes = np.nan_to_num(slopes)
intercepts = np.mean(s1 - slopes * pc1)
num_images = len(percentiles_database)
final_map = slopes.dot(percentiles_database) / num_images + intercepts
return final_map
class LambdaSubject(Transform):
"""Lambda function that is applied on Subject"""
def __init__(
self,
function: TypeCallable,
**kwargs
):
super().__init__(**kwargs)
self.function = function
self.args_names = ('function',)
def apply_transform(self, subject: Subject) -> Subject:
return self.function(subject)
class ToOrientation(SpatialTransform):
"""Generalization of TorchIO-ToCanonical """
def __init__(self, target_orientation, **kwargs):
self.target_orientation = target_orientation
super().__init__(**kwargs)
self.args_names = ('target_orientation',)
def apply_transform(self, subject: Subject) -> Subject:
for image_name, image in subject.get_images_dict(intensity_only=False).items():
subject[f'_org_orientation_{image_name}'] = image.orientation
self._reorient(image, self.target_orientation)
return subject
@classmethod
def _reorient(cls, image, target_orientation):
if image.orientation == tuple(target_orientation):
return
affine = image.affine
array = image.numpy()[np.newaxis] # (1, C, W, H, D)
# NIfTI images should have channels in 5th dimension
array = array.transpose(2, 3, 4, 0, 1) # (W, H, D, 1, C)
nii = nib.Nifti1Image(array, affine)
original_ornt = nib.io_orientation(affine)
target_ornt = nib.orientations.axcodes2ornt(target_orientation)
transform = nib.orientations.ornt_transform(original_ornt, target_ornt)
reoriented = nii.as_reoriented(transform)
# https://nipy.org/nibabel/reference/nibabel.dataobj_images.html#nibabel.dataobj_images.DataobjImage.get_data
array = np.asanyarray(reoriented.dataobj)
# https://github.com/facebookresearch/InferSent/issues/99#issuecomment-446175325
array = array.copy()
array = array.transpose(3, 4, 0, 1, 2) # (1, C, W, H, D)
image.set_data(torch.as_tensor(array[0]))
image.affine = reoriented.affine
def is_invertible(self):
return True
def inverse(self):
return ToOrientationInverse()
class ToOrientationInverse(SpatialTransform):
def apply_transform(self, subject: Subject) -> Subject:
for image_name, image in subject.get_images_dict(intensity_only=False).items():
target_orientation = subject[f'_org_orientation_{image_name}']
ToOrientation._reorient(image, target_orientation)
return subject
| 31,025 | 40.983762 | 167 | py |
TraBS | TraBS-main/breaststudies/utils/prediction.py | import torch
import torch.nn.functional as F
import torchio as tio
import time
import logging
logger = logging.getLogger(__name__)
def series_pred(item_pointers, load_item, model, test_time_flipping=False, device=None):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device is None else device
# Predict all masks within one series (e.g. left and right side of breast)
series_mask = []
series_delta_times = []
for item_pointer in item_pointers:
# ------ Load Source File ---------
item = load_item(item_pointer)
source_tio = item['source']
spacing = torch.tensor(item['spacing'], device=device)[None]
source = source_tio.data.swapaxes(1,-1)
source_batch = source[None] # add Batch-Dimension
source_batch = source_batch.to(device)
# ---------- Run Model ------------
with torch.no_grad():
start_time = time.time()
pred = model.infer(source_batch, spacing=spacing)
roi_delta_time = time.time() - start_time
logger.info(f"Computation took {roi_delta_time:.3f}s")
pred_prob = F.softmax(pred, dim=1) # [Batch, Channels, D, H, W]
# --------- Test Time Augmentation (Flip)
if test_time_flipping:
pred_prob = pred_prob/8
with torch.no_grad():
for flip_axes in [(4,), (3,), (2,), (4, 3), (4, 2), (3,2), (4, 3, 2)]:
pred_prob += torch.flip(F.softmax(model.infer(torch.flip(source_batch, flip_axes), spacing=spacing), dim=1), flip_axes)/8
# ---------- Soft 0-1 to hard prediction 0/1
pred_mask = torch.argmax(pred_prob, dim=1, keepdim=True).type(torch.long) # [Batch, 1, D, H, W]
# ----------- Hot Fix --------------
# pred_mask[pred_mask==2] = 3 # WARNING: Only valid for training purposes, uncomment otherwise
# -------- Add prediction to TorchIO-subject --------
pred_mask_tio = tio.LabelMap(tensor=pred_mask[0].swapaxes(1,-1).cpu(), affine=source_tio.affine)
item.add_image(pred_mask_tio, 'prediction')
if '_org_orientation_source' in item: # If ToOrientation was applied, add property for reverse process
item['_org_orientation_prediction'] = item['_org_orientation_source']
# Reverse augmentation
item_inv = item.apply_inverse_transform(warn=False) # -> get_inverse_transform()
pred_mask = item_inv['prediction'].data.swapaxes(1,-1)[None]
# Add item prediction to list for series prediction
series_mask.append(pred_mask)
series_delta_times.append(roi_delta_time)
# Fusion all crops/items
pred_mask = torch.sum(torch.stack(series_mask), dim=0) # Note: summation is possible as e.g. contralateral side were padded with zeros
return pred_mask, series_delta_times | 2,908 | 44.453125 | 141 | py |
TraBS | TraBS-main/breaststudies/utils/functions.py | import torch
import torch.nn.functional as F
import numpy as np
from torchvision.utils import draw_segmentation_masks
def heaviside(input, threshold=0.5):
"""Heaviside function
Arguments:
input {torch.Tensor} -- Input tensor
Keyword Arguments:
threshold {float} -- Input values greater or equal threshold are set to one (default: {0.5})
Returns:
torch.Tensor -- Binary tensor
Warning:
This function destroys the backprogation path !
See also:
PyTorch>1.7 https://pytorch.org/docs/1.7.0/generated/torch.heaviside.html#torch.heaviside
"""
# Note: If 'requires_grad=True' , the backpropagation path will end in either 'upper' or 'lower' but not in 'input'
upper = torch.ones(1, device=input.device, requires_grad=False)
lower = torch.zeros(1, device=input.device , requires_grad=False)
return torch.where(input>=threshold, upper, lower)
def one_hot(tensor, num_classes=-1):
"""Wrapper for pytorch one-hot encoding
Arguments:
tensor {torch.Tensor} -- Tensor to be encoded of shape [Batch, (Depth), Height, Width]
Keyword Arguments:
num_classes {int} -- number of classes (default: {-1})
Returns:
torch.Tensor -- Tensor of shape [Batch, Classes, (Depth), Height, Width]
Warning:
This function destroys the backprogation path !
"""
return F.one_hot(tensor.long(),num_classes).permute(0,tensor.ndim,*list(range(1,tensor.ndim)))
def minmax_norm(x, max=1, smooth_nr=1e-5, smooth_dr=1e-5):
"""Normalizes input to [0, max] for each beach and channel
Args:
x (torch.Tensor): Tensor to be normalized, Shape [Batch, Channel, *]
Returns:
torch.Tensor: Normalized tensor
"""
return torch.stack([ torch.stack([(ch-ch.min()+smooth_nr)/(ch.max()-ch.min()+smooth_dr)*max for ch in batch]) for batch in x])
def minmax_norm_slice(x, max=1, smooth_nr=1e-5, smooth_dr=1e-5):
"""Normalizes input to [0, max] for each beach, channel and slice
Args:
x (torch.Tensor): Tensor to be normalized, Shape [Batch, Channel, *]
Returns:
torch.Tensor: Normalized tensor
"""
return torch.stack([ torch.stack([ torch.stack([(sl-sl.min()+smooth_nr)/(sl.max()-sl.min()+smooth_dr)*max for sl in ch]) for ch in batch]) for batch in x])
def tensor2image(tensor, batch=0):
"""Transform tensor into shape of multiple 2D RGB/gray images.
Keep 2D images as they are (gray or RGB).
For 3D images, pick 'batch' and use depth and interleaved channels as batch (multiple gray images).
Args:
tensor (torch.Tensor): Image of shape [B, C, H, W] or [B, C, D, H, W]
Returns:
torch.Tensor: Image of shape [B, C, H, W] or [DxC,1, H, W] (Compatible with torchvision.utils.save_image)
"""
return (tensor if tensor.ndim<5 else torch.swapaxes(tensor[batch], 0, 1).reshape(-1, *tensor.shape[-2:])[:,None])
def tensor_mask2image(tensor, mask_hot, batch=0, alpha=0.25, colors=None, exclude_chs=[]):
"""Transform a tensor and a one-hot mask into multiple 2D RGB images.
Args:
tensor (torch.Tensor): Image tensor. Can be 3D volume of shape [B, C, D, W, H] or 2D of shape [B, C, H, W]
mask_hot (torch.Tensor): One-Hot encoded mask of shape [B, Classes, D, W, H] or [B, Classes, H, W]
batch (int, optional): Batch to use if input is 3D. Defaults to 0.
alpha (float, optional): 1-Transparency. Defaults to 0.25.
Returns:
torch.Tensor: Tensor of 2D-RGB images with transparent mask on each. For 3D will be [CxD, 3, H, W] for 2D will be [B, 3, H, W]
"""
mask_hot = mask_hot.type(torch.bool).cpu() # To bool and cpu (see bug below)
mask_hot = mask_hot if mask_hot.ndim<5 else torch.swapaxes(mask_hot[batch], 0, 1) # 3D [B, C, D, H, W] -> [C, D, H, W]. 2D [B, C, H, W] -> [B, C, H, W]
image = minmax_norm(tensor, 255).type(torch.uint8).cpu() # To uint8 and cpu (see bug below)
image = image[None] if image.ndim==4 else image[batch][:,:,None] # 3D [B, C, D, H, W] -> [C, D, 1, H, W]. 2D [B, C, H, W] -> [1, B, C, H, W]
image = torch.cat([image for _ in range(3)], dim=2) if image.shape[2]!=3 else image # Ensure RGB [*, 3, H, W]
image = torch.stack([draw_segmentation_masks(i, m, alpha=alpha, colors=colors) if ch not in exclude_chs else i for ch, img_ch in enumerate(image) for i,m in zip(img_ch, mask_hot) ]) # [B, 3, H, W] # BUG Apparently only supports cpu()
return image/255.0
| 4,556 | 41.588785 | 239 | py |
TraBS | TraBS-main/breaststudies/utils/data.py | import numpy as np
import SimpleITK as sitk
from scipy.ndimage import zoom
def get_affine(image):
# Coppied from TorchIO:
# https://github.com/fepegar/torchio/blob/164a1bf3699863ef3a74f2a7694f6f4cf0fff361/torchio/data/io.py#L271
spacing = np.array(image.GetSpacing())
direction = np.array(image.GetDirection())
origin = image.GetOrigin()
return get_affine2(spacing, direction, origin, lps2ras=True)
def get_affine2(spacing, direction, origin, lps2ras=False):
spacing = np.asarray(spacing)
direction = np.asarray(direction)
origin = np.asarray(origin)
if len(direction) == 9:
rotation = direction.reshape(3, 3)
elif len(direction) == 4: # ignore first dimension if 2D (1, W, H, 1)
rotation_2d = direction.reshape(2, 2)
rotation = np.eye(3)
rotation[:2, :2] = rotation_2d
spacing = *spacing, 1
origin = *origin, 0
else:
raise RuntimeError(f'Direction not understood: {direction}')
flip_xy = np.diag((-1, -1, 1)) if lps2ras else np.diag((1, 1, 1)) # used to switch between LPS and RAS
rotation = np.dot(flip_xy, rotation)
rotation_zoom = rotation * spacing
translation = np.dot(flip_xy, origin)
affine = np.eye(4)
affine[:3, :3] = rotation_zoom
affine[:3, 3] = translation
return affine
def sitk_resample_to_shape(img, x, y, z, order=3):
"""
Resamples Image to given shape
Parameters
----------
img : SimpleITK.Image
x : int
shape in x-direction
y : int
shape in y-direction
z : int
shape in z-direction
order : int
interpolation order
Returns
-------
SimpleITK.Image
Resampled Image
"""
if img.GetSize() != (x, y, z):
img_np = sitk.GetArrayFromImage(img)
zoom_fac_z = z / img_np.shape[0]
zoom_fac_y = y / img_np.shape[1]
zoom_fac_x = x / img_np.shape[2]
img_np_fixed_size = zoom(img_np,
[zoom_fac_z,
zoom_fac_y,
zoom_fac_x],
order=order)
img_resampled = sitk.GetImageFromArray(img_np_fixed_size)
img_resampled = sitk_copy_metadata(img, img_resampled)
img_resampled.SetDirection(img.GetDirection())
img_resampled.SetOrigin(img.GetOrigin())
spacing_x = img.GetSpacing()[0] * (1 + 1 - (zoom_fac_x))
spacing_y = img.GetSpacing()[1] * (1 + 1 - (zoom_fac_y))
spacing_z = img.GetSpacing()[2] * (1 + 1 - (zoom_fac_z))
img_resampled.SetSpacing((spacing_x, spacing_y, spacing_z))
return img_resampled
else:
return img
def sitk_copy_metadata(img_source, img_target):
""" Copy metadata (=DICOM Tags) from one image to another
Parameters
----------
img_source : SimpleITK.Image
Source image
img_target : SimpleITK.Image
Source image
Returns
-------
SimpleITK.Image
Target image with copied metadata
"""
for k in img_source.GetMetaDataKeys():
img_target.SetMetaData(k, img_source.GetMetaData(k))
return img_target | 3,180 | 30.81 | 110 | py |
TraBS | TraBS-main/breaststudies/data/datamodule.py | from pathlib import Path
import yaml
import itertools
from tqdm import tqdm
import pytorch_lightning as pl
import torch
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import RandomSampler, WeightedRandomSampler
import torch.multiprocessing as mp
class BaseDataModule(pl.LightningDataModule):
Dataset = torch.utils.data.Dataset
def __init__(self,
batch_size: int = 1,
val_set: bool = True,
test_set: bool = True,
n_splits=5,
num_workers: int = mp.cpu_count(),
seed: int = 0,
path_root: str = None,
**ds_kwargs):
super().__init__()
self.hyperparameters = {**locals()}
self.hyperparameters.pop('__class__')
self.hyperparameters.pop('self')
self.hyperparameters.update(**ds_kwargs)
self.hyperparameters.pop('ds_kwargs')
self.batch_size = batch_size
self.val_set = val_set
self.test_set = test_set
self.n_splits = n_splits
self.num_workers = num_workers
self.seed = seed
self.path_root = self.Dataset.path_root_default if path_root is None else path_root
self.ds_kwargs = ds_kwargs
self._item_pointers_split = {}
def save(self, path_config_dir, config_file='data_conf.yaml', split_file='data_split.yaml'):
path_config_dir = Path(path_config_dir)
path_config_dir.mkdir(parents=True, exist_ok=True)
# Get parameters set during 'setup()' or get default values
for ds_name in ['ds_train', 'ds_val', 'ds_test']:
ds = getattr(self, ds_name, None)
if ds is not None:
params = dict(ds.hyperparameters)
params.pop('path_root')
params.pop('item_pointers')
self.hyperparameters['params_'+ds_name] = params
with open(path_config_dir / config_file, "w", newline="") as fp:
yaml.dump(self.hyperparameters, fp)
with open(path_config_dir / split_file, "w", newline="") as fp:
yaml.dump(self._item_pointers_split, fp)
@classmethod
def load(cls, path_config_dir, config_file='data_conf.yaml', split_file='data_split.yaml', **kwargs):
with open(Path(path_config_dir) / config_file, 'r') as f:
hyperparameters = yaml.load(f, Loader=yaml.UnsafeLoader)
hyperparameters.update(kwargs)
dm = cls(**hyperparameters)
dm._item_pointers_split = cls.load_split(path_config_dir, split_file)
return dm
@classmethod
def load_split(self, path_config_dir, split_file='data_split.yaml'):
path_split_file = Path(path_config_dir) / split_file
if path_split_file.is_file():
with open(Path(path_config_dir) / split_file, 'r') as f:
split_dict = yaml.load(f, Loader=yaml.UnsafeLoader)
else:
raise Exception('File not found: {}'.format(path_split_file))
return split_dict
def setup(self, stage=None, split=0, params_ds_train={}, params_ds_val={}, params_ds_test={} ):
# Create new split if not already exists
if len(self._item_pointers_split) ==0:
self._item_pointers_split = self.Dataset.groupkfold(self.path_root, self.n_splits, self.val_set, self.test_set)
self.setup_split(stage=stage, split=split, params_ds_train=params_ds_train, params_ds_val=params_ds_val, params_ds_test=params_ds_test)
def setup_split(self, stage=None, split=0, params_ds_train={}, params_ds_val={}, params_ds_test={}):
item_pointers_split = self._item_pointers_split[split]
val_set = 'val' in item_pointers_split
test_set = 'test' in item_pointers_split
ds_params = self.ds_kwargs
if stage == 'fit' or stage is None:
params = {}
params.update({k: ds_params[k] for k in ds_params.keys() - {'params_ds_train', 'params_ds_val', 'params_ds_test'}})
params.update(ds_params.get('params_ds_train', {})) # Overwrite with dataset specific parameters
params.update(params_ds_train) # Overwrite with dataset specific parameters passed to setup() call
params['path_root'] = self.path_root
params['item_pointers'] = item_pointers_split['train']
self.ds_train = self.Dataset(**params)
if val_set:
params = {}
params.update({k: ds_params[k] for k in ds_params.keys() - {'params_ds_train', 'params_ds_val', 'params_ds_test'}})
params.update(ds_params.get('params_ds_val', {}))
params.update(params_ds_val) # FIXME: Add function for nested dict update
params['path_root'] = self.path_root
params['item_pointers'] = item_pointers_split['val']
self.ds_val = self.Dataset(**params)
if stage == 'test' or stage is None:
if not test_set:
raise AssertionError("A test test set was not requested during initialization. Adjust your settings.")
params = {}
params.update({k: ds_params[k] for k in ds_params.keys() - {'params_ds_train', 'params_ds_val', 'params_ds_test'}})
params.update(ds_params.get('params_ds_test', {}))
params.update(params_ds_test)
params['path_root'] = self.path_root
params['item_pointers'] = item_pointers_split['test']
self.ds_test = self.Dataset(**params)
def train_dataloader(self):
generator = torch.Generator()
generator.manual_seed(self.seed)
# NOTE: Speed up for 2D: Load (in random order) all slices within one 3D volume before proceeding with next volume
# return DataLoader(self.ds_train, batch_size=self.batch_size, num_workers=self.num_workers,
# sampler=self.rand_series_sampler(self.ds_train))
return DataLoader(self.ds_train, batch_size=self.batch_size, num_workers=self.num_workers,
sampler=RandomSampler(self.ds_train, replacement=True, num_samples=len(self.ds_train)), # NOTE: nnUNet default is num_samples=250
generator=generator,
drop_last=True)
def val_dataloader(self):
generator = torch.Generator()
generator.manual_seed(self.seed)
if self.val_set:
return DataLoader(self.ds_val, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False,
generator=generator
)
else:
raise AssertionError("A validation set was not requested during initialization. Adjust your settings.")
def test_dataloader(self):
generator = torch.Generator()
generator.manual_seed(self.seed)
if self.test_set:
return DataLoader(self.ds_test, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=False,
# worker_init_fn=self.worker_init_fn
generator = generator,
)
else:
raise AssertionError("A test test set was not requested during initialization. Adjust your settings.")
# def worker_init_fn(self, worker_id):
# # Note: By default, all workers have different seeds (important for data augmentation)
# # worker_info = torch.utils.data.get_worker_info()
# if self.seed is not None:
# # np.random.seed(self.seed) # may be overwritten or ignored by other modules
# # np.random.default_rng(self.seed)
# torch.manual_seed(self.seed)
@classmethod
def rand_series_sampler(cls, dataset, generator=None):
class SeriesRandomSampler(RandomSampler):
"""Items are sampled randomly but all items within a series are subsequently (randomly) sampled."""
def __iter__(self):
# Get number of series and number of items per series
series_pointers = self.data_source.get_series_pointers()
n_series = len(series_pointers)
n_items = [len(series_pointer) for series_pointer in series_pointers.values() ]
# Create list of indices per series element
counter = itertools.count(0)
idxs = [[next(counter) for _ in item_pointers] for item_pointers in series_pointers.values()]
if self.generator is None:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
else:
generator = self.generator
if self.replacement:
raise NotImplementedError()
else:
for series_idx in torch.randperm(n_series, generator=self.generator):
for sires_item_idx in torch.randperm(n_items[series_idx], generator=self.generator):
yield idxs[series_idx][sires_item_idx]
return SeriesRandomSampler(dataset, generator=generator)
@classmethod
def weighted_series_sampler(cls, dataset, model, generator=None):
class SeriesWeightedRandomSampler(WeightedRandomSampler):
def __init__(self, dataset, weights, num_samples: int, replacement: bool = True, generator=None) -> None:
if not isinstance(num_samples, int) or isinstance(num_samples, bool) or num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(num_samples))
if not isinstance(replacement, bool):
raise ValueError("replacement should be a boolean value, but got "
"replacement={}".format(replacement))
self.weights = weights
self.num_samples = num_samples
self.replacement = replacement
self.generator = generator
self.data_source = dataset
def __iter__(self):
# Get number of series and number of items per series
series_pointers = self.data_source.get_series_pointers()
n_series = len(series_pointers)
n_items = [len(series_pointer) for series_pointer in series_pointers.values() ]
# Create list of indices per series element
counter = itertools.count(0)
idxs = [[next(counter) for _ in item_pointers] for item_pointers in series_pointers.values()]
for series_idx in torch.multinomial(torch.as_tensor(self.weights[0], dtype=torch.double), n_series, self.replacement, generator=self.generator):
for sires_item_idx in torch.multinomial(torch.as_tensor(self.weights[1][series_idx], dtype=torch.double), n_items[series_idx], self.replacement, generator=self.generator):
yield idxs[series_idx][sires_item_idx]
# -------------- Compute weights --------------
sample_weights = [[], []] # [[series1, series2, ...], [[item1,item2], [item1, item2], ...]]
for _, item_pointers in tqdm(dataset.get_series_pointers().items()):
sample_weights[1].append([])
for item_pointer in item_pointers:
item = dataset.load_item(item_pointer)
target = item['target']
weight = sum([label_val in target for label_val in dataset.labels.values()])
sample_weights[1][-1].append(weight)
sample_weights[0].append(max(sample_weights[1][-1]))
return SeriesWeightedRandomSampler(dataset, weights=sample_weights, num_samples=len(dataset), replacement=True, generator=generator) | 11,957 | 47.217742 | 191 | py |
TraBS | TraBS-main/breaststudies/data/datamodule_breast.py | import torch
from breaststudies.data import BaseDataModule, BreastDataset, BreastDatasetLR, BreastDataset2D, BreastUKADatasetLR
class BreastDataModule(BaseDataModule):
Dataset = BreastDataset
label2rgb = torch.tensor([
[0,0,0], # Background
[255,0,0], # Label 1
[0, 255, 0], # Label 2
[0, 0, 255], # Label 3
[255, 255, 0], # Label 4
[255, 0, 255], # Label 5
[0, 255, 255], # Label 6
[255, 255, 255] # Label 7
], dtype=torch.uint8)
class BreastDataModuleLR(BreastDataModule):
Dataset = BreastDatasetLR
class BreastDataModule2D(BreastDataModule):
Dataset = BreastDataset2D
class BreastUKADataModuleLR(BreastDataModule):
Dataset = BreastUKADatasetLR
| 976 | 31.566667 | 115 | py |
TraBS | TraBS-main/breaststudies/data/dataset_breast.py | import logging
from pathlib import Path
import json
import torchio as tio
import SimpleITK as sitk
import numpy as np
from breaststudies.augmentation import ZNormalization, CropOrPadFixed
from breaststudies.data import BaseDataset
from breaststudies.utils import get_affine
logger = logging.getLogger(__name__)
class BreastDataset(BaseDataset):
path_root_default = Path('/home/gustav/Documents/datasets/BreastDataset/Gustav')
default_target = 'tissue'
default_target_files = {'tissue': {'target':'mask_tissue.nii.gz', 'mask':'mask_breast.nii.gz'}, 'breast': {'target':'mask_breast.nii.gz'}}
default_source_files = {'tissue': {'source': [ 'Dyn_0.nii', 'T2_resampled.nii', 'Sub.nii' ]}, 'breast':{'source':['Dyn_0.nii', 'T2_resampled.nii']} }
label_dict_tissue = {'Background':0, 'FGT':1, 'Fibroadenoma':2, 'Carcinoma':3, 'DCIS':4}
label_dict_breast = {'Background':0, 'Breast':1, 'Implant':2}
default_roi = (256, 256, 32)
# --------------------------------- Preprocessing ---------------------------------------
# Is applied to individual items (eg. Left/Right or Slice) i.e. after 'series2items'
default_item_trans = tio.Compose([
# ZNormalization(),
# CropOrPadFixed(default_roi, padding_mode='minimum')
])
# Is applied to the entire 3D volume
default_series_trans = tio.ToCanonical()
def _post_init(self):
target = self.kwargs.get('target', None)
if (target is not None) and ('target_files' not in self.kwargs):
self.kwargs['target_files'] = self.default_target_files[target]
if (target is not None) and ('source_files' not in self.kwargs):
self.kwargs['source_files'] = self.default_source_files[target]
@classmethod
def init_crawler(cls, path_root=None, **kwargs):
"""Return an iterator over all items e.g. path_root.rglob('*.jpg')"""
return ((path_dir.relative_to(path_root),) for path_dir in Path(path_root).iterdir() if path_dir.is_dir())
@classmethod
def item_pointer2uid(cls, item_pointer, path_root=None, id_type='item', **kwargs):
"""Returns identifier for each item.
Args:
item_pointer (object): Unique pointer (e.g. Path or (Path, Slice)) to each item
path_root (Path, optional): Path to root directory
id_type (str, optional): Specifies the id-type e.g. case or study
"""
path_root = cls.path_root_default if path_root is None else Path(path_root)
path_item = path_root/Path(item_pointer[0])
if id_type=="patient":
dicom_info = cls.load_dicom_tags(item_pointer, path_root=path_root, **kwargs)
return dicom_info["PatientID"] # Hint: PatientID (0010,0020)
if id_type == "study":
raise NotImplementedError
elif id_type == "series":
return item_pointer[0].name # e.g. 90xxxxx
elif id_type == "item":
return item_pointer[0].name # e.g. 90xxxxx
@classmethod
def load_series(cls, item_pointer, path_root, **kwargs):
"Load (image) data and return as dict"
source_files = kwargs.get('source_files', cls.default_source_files[cls.default_target])
target_files = kwargs.get('target_files', cls.default_target_files[cls.default_target])
path_item_source = Path(path_root)/item_pointer[0]
path_item_target = {name:Path(kwargs.get('path_root_target', {}).get(name, path_root))/item_pointer[0] for name in target_files.keys()} # Target files (masks) might be placed under a different folder
series_id = cls.item_pointer2uid(item_pointer, path_root, 'series')
spacing = None
if kwargs.get('raw', False):
sources = dict({name:(sitk.ReadImage(str(path_item_source/filename), sitk.sitkFloat32) if (path_item_source/filename).is_file() else None) for name, filename in source_files.items()})
targets = dict({name:(sitk.ReadImage(str(path_item_target[name]/filename), sitk.sitkUInt8) if (path_item_target[name]/filename).is_file() else None) for name, filename in target_files.items()})
dicomtags = cls.load_dicom_tags(item_pointer, path_root, **kwargs)
return {'uid': series_id, **sources, **targets, **dicomtags}
# ---------- Load source(s) ------------
sources = {}
for source_name, channel_files in source_files.items():
try:
channels = []
for channel_file in channel_files:
ch_nii = sitk.ReadImage(str(path_item_source/channel_file), sitk.sitkFloat32)
affine = get_affine(ch_nii)
ch_np = sitk.GetArrayFromImage(ch_nii).transpose() # [W, H, D], WARNING TorchIO opposite PyTorch order
channels.append(ch_np)
channels = np.stack(channels, axis=0)
source = tio.ScalarImage(tensor=channels, affine=affine) # WARNING: generally affine not equal across different images
spacing = source.spacing
sources[source_name] = source
except Exception as e:
logger.warning(f"Could not load {source_name} because: {e}")
# ---------- Load target ---------------
targets = {}
for name, target_file in target_files.items():
target = sitk.ReadImage(str(path_item_target[name]/target_file), sitk.sitkUInt8)
affine = get_affine(target)
target = sitk.GetArrayFromImage(target).transpose() # [W, H, D], WARNING TorchIO opposite PyTorch order
target = target.astype(np.float32) # Will be casted as long - prevents error when using num_workers>0 and stack()
target = target[None] # [C, W, H, D]
target = kwargs.get('manipulate_label_func', lambda x:x[0])((target, name, sources)) # eg. lambda x: np.where(x[0]>1, 0, 1) if x[1] == 'target' else x[0]
targets[name] = tio.LabelMap(tensor=target, affine=affine)
spacing = targets[name].spacing
# ----------- Load Metadata -------------
dicomtags = cls.load_dicom_tags(item_pointer, path_root, **kwargs) if kwargs.get('load_dicomtags', False) else {}
spacing = {'spacing':np.array(spacing, dtype=np.float32)} if spacing is not None else {}
return tio.Subject({'uid': series_id, **sources, **targets, **spacing, **dicomtags })
@classmethod
def tio2torch(self, series):
# Transform TorchIO Subject to PyTorch (Tensor) and Shape-Order
return {key: val.data.swapaxes(1,-1) if isinstance(val, tio.Image) else val for key,val in series.items()}
@classmethod
def tio2numpy(self, series):
# Transform TorchIO Subject to PyTorch (Tensor) and Shape-Order
return {key: val.numpy().swapaxes(1,-1) if isinstance(val, tio.Image) else val for key,val in series.items()}
@classmethod
def tio2sitk(self, series):
# Transform TorchIO Subject to PyTorch (Tensor) and Shape-Order
return {key: val.as_sitk() if isinstance(val, tio.Image) else val for key,val in series.items()}
@classmethod
def item2out(cls, item, **kwargs):
out_format = kwargs.get('out_format', 'torch')
if out_format == 'torch':
return cls.tio2torch(item)
if out_format == 'numpy':
return cls.tio2numpy(item)
elif out_format == 'sitk':
return cls.tio2sitk(item)
elif out_format == 'tio':
return item
@property
def labels(self):
"Return a dict of label names and their label values e.g. {'Lesion A':1, 'Lesion B':2} "
if self.kwargs.get('target', 'tissue') == 'tissue':
return self.label_dict_tissue
else:
return self.label_dict_breast
@property
def label_fcts(self):
"Return a dict of label names (and combinations) and their mask function e.g. {'All Lesions':lambda x:x>=1} "
if self.kwargs.get('target', 'tissue') == 'tissue':
return {'Background': lambda x:x==0,
'FGT': lambda x:x==1,
'Fibroadenoma': lambda x:x==2,
'Carcinoma': lambda x:x==3,
'DCIS': lambda x:x==4,
'FGT or Carcinoma': lambda x:x==5,
'B3 Lesion': lambda x:x==6}
else:
return {'Background': lambda x:x==0,
'Breast': lambda x:x==1,
'Implant': lambda x:x==2 }
@classmethod
def load_dicom_tags(cls, item_pointer, path_root=None, **kwargs):
path_item = Path(path_root)/item_pointer[0]
file_name = kwargs.get('json_file', 'Dyn.json')
with open(path_item/file_name, 'r') as f:
dicom_tags = json.load(f)
return dicom_tags
@classmethod
def apply_crop(cls, subject, crop_dict):
subjects = {}
for crop_name, trans in crop_dict.items():
subject_side = trans(subject)
subject_side['uid'] = subject['uid']+'_'+crop_name
subjects[crop_name] = subject_side
return subjects
class BreastDatasetLR(BreastDataset):
@classmethod
def init_crawler(cls, path_root, **kwargs):
"""Return an iterator over all items e.g. path_root.rglob('*.jpg')"""
return ((path_dir.relative_to(path_root),side) for path_dir in Path(path_root).iterdir() for side in ['left', 'right'] if path_dir.is_dir())
@classmethod
def item_pointer2uid(cls, item_pointer, path_root=None, id_type='item'):
if id_type=='item':
return '_'.join([str(elem) for elem in item_pointer])
else:
return BreastDataset.item_pointer2uid(item_pointer, path_root=path_root, id_type=id_type)
@classmethod
def get_lr_crop(cls, subject, **kwargs):
shape = subject.spatial_shape # [W, H, D]
crop_dict = {'left':tio.Crop((shape[0]//2, 0, 0, 0, 0, 0)),
'right':tio.Crop((0, shape[0]//2, 0, 0, 0, 0)) }
return crop_dict
@classmethod
def get_lr_breast_crop(cls, subject, **kwargs):
crop_dict = cls.get_lr_crop(subject)
target_shape = kwargs.get('target_shape', None) # If set, it's used here as a minimum shape (extend bbox if target_shape>mask_shape)
patches = {}
for side in ['left', 'right']:
if target_shape is not None:
mask_bbox = np.diff(CropOrPadFixed._bbox_mask(crop_dict[side](subject['mask']).numpy()[0]), axis=0)[0]
target_shape = tuple(max(x, y) if x is not None else y for x, y in zip(target_shape, mask_bbox))
patches[side] = tio.Compose([
crop_dict[side],
CropOrPadFixed(target_shape, mask_name='mask', labels=(1,), padding_mode=0)
])
return patches
@classmethod
def series2items(cls, item_pointer, series, **kwargs):
# ------- Get dict that specifies crop region
if kwargs.get('target') == 'tissue':
crop_dict = cls.get_lr_breast_crop(series, **kwargs)
else:
crop_dict = cls.get_lr_crop(series, **kwargs)
series_crops = cls.apply_crop(series, crop_dict) # Also adapts the UID
return { (*item_pointer, crop_name):crop_series for crop_name, crop_series in series_crops.items()}
class BreastDataset2D(BreastDataset):
@classmethod
def init_crawler(cls, path_root, **kwargs):
"""Return an iterator over all items e.g. path_root.rglob('*.jpg')"""
return ( (path_dir.relative_to(path_root), str(slice_n))
for path_dir in Path(path_root).iterdir()
for slice_n in range(cls.load_dicom_tags((path_dir,), path_root, **kwargs)['_NumberOfSlices'])
if path_dir.is_dir() )
@classmethod
def _get_slice_crop(cls, slices, **kwargs):
return {str(slice_i):tio.Crop((0,0, 0,0, slice_i,slices-(slice_i+1))) for slice_i in range(slices) }
@classmethod
def series2items(cls, item_pointer, series, **kwargs):
crop_dict = cls._get_slice_crop(series.spatial_shape[-1], **kwargs)
series_crops = cls.apply_crop(series, crop_dict)
return { (*item_pointer, crop_name):crop_series for crop_name, crop_series in series_crops.items()}
@classmethod
def item_pointer2uid(cls, item_pointer, path_root=None, id_type='item'):
if id_type=='item':
return str(item_pointer[0])+'_'+str(item_pointer[1])
else:
return BreastDataset.item_pointer2uid(item_pointer, path_root=path_root, id_type=id_type)
class BreastDatasetLR2D(BreastDatasetLR, BreastDataset2D):
@classmethod
def init_crawler(cls, path_root, **kwargs):
"""Return an iterator over all items e.g. path_root.rglob('*.jpg')"""
def slice_crawler_func(cls, path_root, path_dir, side, **kwargs):
return cls.load_dicom_tags((path_dir,), path_root, **kwargs)['_NumberOfSlices']
slice_crawler_func = kwargs.get('slice_crawler_func',slice_crawler_func)
# uids_exclude = [path_file.stem.split('_')[0] for path_file in Path('/mnt/hdd/datasets/breast/Pix2Pix/Pix2Pix_LowDoseOnly2/train_A').iterdir()]
# dirs = [path_dir for path_dir in Path(path_root).iterdir() if path_dir.name not in uids_exclude ]
return ( (path_dir.relative_to(path_root), side, str(slice_n) )
for path_dir in Path(path_root).iterdir()
for side in ['left', 'right']
for slice_n in range(slice_crawler_func(cls, path_root, path_dir, side, **kwargs))
if path_dir.is_dir() )
@classmethod
def series2items(cls, item_pointer, series, **kwargs):
items_lr = BreastDatasetLR.series2items(item_pointer[0:1], series, **kwargs)
items = {}
for sub_item_pointer, sub_series in items_lr.items():
items.update(BreastDataset2D.series2items(sub_item_pointer, sub_series, **kwargs))
return items
class BreastUKADataset(BreastDataset):
path_root_default = Path('/mnt/hdd/datasets/breast/UKA/UKA_2021_05_25/')
default_target_files = {'tissue': {'target':'mask_tissue_3dunet.nii.gz', 'mask':'mask_breast_3dunet.nii.gz'},
'breast': {'target':'mask_breast_3dunet.nii.gz'}}
@classmethod
def manipulate_label_func(cls, x):
target,name, sources = x
if (name == 'target'):
# Option - simplified
target[target==5] = 1 # Assume normal breast tissue in transition area between carcinoma and breast tissue (label 5)
target[target==6] = 0 # Assume "background" for radial scar (label 6) (very difficult for expert to differentiate between those classes, potential to be malignant)
# Option - only FGT
# target[target>1] = 0 # "Remove" everything that is not FGT
# Option - only Carcinoma
# target[target !=3] = 0 # Everything that is no a Carcinoma, remove
# target[target ==3] = 1 # Relabel Carcinoma as Label 1
# Option - combine DCIS and Carcinoma
target[target==4] = 3
elif (name == 'mask') and len(sources):
ch = sources['source'].shape[0]
target = target>0 # Don't separate between subcategories (eg. implants)
target = np.tile(target, (ch,1,1,1)) # Number of mask-channels must match number of source channels
return target
class BreastUKADatasetLR(BreastUKADataset, BreastDatasetLR):
pass
class BreastUKADataset2D(BreastUKADataset, BreastDataset2D):
pass
class BreastUKADatasetLR2D(BreastUKADataset, BreastDatasetLR2D):
pass
# --------------------------------------------- DUKE -----------------------------------------------------------------
class BreastDUKEDataset(BreastDataset):
path_root_default = Path('/mnt/hdd/datasets/breast/DUKE/dataset')
default_target_files = {'tissue': {'target':'mask_tissue_3dunet.nii.gz', 'mask':'mask_breast_3dunet.nii.gz'},
'breast': {'target':'mask_breast_3dunet.nii.gz'}}
default_source_files = {'tissue': {'source': ['T1.nii.gz', 'sub_resampled.nii']}, 'breast':{'source':['T1.nii.gz']} }
@classmethod
def load_dicom_tags(cls, item_pointer, path_root=None, **kwargs):
path_item = Path(path_root)/item_pointer[0]
file_name = kwargs.get('json_file', 'pre.json')
with open(path_item/file_name, 'r') as f:
dicom_tags = json.load(f)
return dicom_tags
class BreastDUKEDatasetLR(BreastDUKEDataset, BreastDatasetLR):
pass
class BreastDUKEDataset2D(BreastDUKEDataset, BreastDataset2D):
pass
class BreastDUKEDatasetLR2D(BreastDUKEDataset, BreastDatasetLR2D):
pass
class BreastDUKESubsetDataset(BreastDUKEDataset):
default_target_files = {'tissue': {'target':'mask_tissue.nii.gz', 'mask':'mask_breast.nii.gz'},
'breast': {'target':'mask_breast.nii.gz'}}
path_root_default = Path('/home/gustav/Documents/datasets/BreastDataset/DUKESubset')
class BreastDUKESubsetDatasetLR(BreastDUKESubsetDataset, BreastDUKEDatasetLR):
pass
class BreastDUKESubsetDataset2D(BreastDUKESubsetDataset, BreastDUKEDataset2D):
pass
class BreastDUKESubsetDatasetLR2D(BreastDUKESubsetDataset, BreastDUKEDatasetLR2D):
pass
# ------------------------------- BREAST-DIAGNOSIS -------------------------------------------------------------
class BreastDIAGNOSISDataset(BreastDataset):
path_root_default = Path('/mnt/hdd/datasets/breast/BREAST-DIAGNOSIS/dataset')
default_target_files = {'tissue': {'target':'mask_tissue_3dunet.nii.gz', 'mask':'mask_breast_3dunet.nii.gz'},
'breast': {'target':'mask_breast_3dunet.nii.gz'}}
default_source_files = {'tissue': {'source': ['T2.nii.gz', 'sub_resampled.nii']}, 'breast':{'source':['T2.nii.gz']} }
@classmethod
def init_crawler(cls, path_root=None, **kwargs):
"""Return an iterator over all items e.g. path_root.rglob('*.jpg')"""
return ((path_sub_dir.relative_to(path_root),) for path_dir in Path(path_root).iterdir() if path_dir.is_dir()
for path_sub_dir in Path(path_dir).iterdir() if path_sub_dir.is_dir() )
@classmethod
def item_pointer2uid(cls, item_pointer, path_root=None, id_type='item', **kwargs):
"""Returns identifier for each item.
Args:
item_pointer (object): Unique pointer (e.g. Path or (Path, Slice)) to each item
path_root (Path, optional): Path to root directory
id_type (str, optional): Specifies the id-type e.g. case or study
"""
if id_type=="patient":
dicom_info = cls.load_dicom_tags(item_pointer, path_root=path_root, **kwargs)
return dicom_info["PatientID"] # Hint: PatientID (0010,0020)
if id_type == "study":
return item_pointer[0].parts[0] # e.g. 0001
elif id_type == "series":
return '_'.join(item_pointer[0].parts)
elif id_type == "item":
return '_'.join(item_pointer[0].parts) # e.g. 0001_08-12-2008
else:
raise "Unknown id_type"
@classmethod
def load_dicom_tags(cls, item_pointer, path_root=None, **kwargs):
path_root = cls.path_root_default if path_root is None else Path(path_root)
path_item = Path(path_root)/item_pointer[0]
file_name = kwargs.get('json_file', 'BLISS.json')
path_file = path_item/file_name
path_file = path_file if path_file.is_file() else path_item/'T2.json'
path_file = path_file if path_file.is_file() else path_item/'STIR.json'
with open(path_file, 'r') as f:
dicom_tags = json.load(f)
return dicom_tags
class BreastDIAGNOSISDatasetLR(BreastDIAGNOSISDataset, BreastDatasetLR):
@classmethod
def init_crawler(cls, path_root=None, **kwargs):
"""Return an iterator over all items e.g. path_root.rglob('*.jpg')"""
return ((path_sub_dir.relative_to(path_root), side) for path_dir in Path(path_root).iterdir() if path_dir.is_dir()
for path_sub_dir in Path(path_dir).iterdir() if path_sub_dir.is_dir() for side in ['left', 'right'] )
class BreastDIAGNOSISDataset2D(BreastDIAGNOSISDataset, BreastDataset2D):
pass
class BreastDIAGNOSISDatasetLR2D(BreastDIAGNOSISDataset, BreastDatasetLR2D):
pass
class BreastDIAGNOSISSubsetDataset(BreastDIAGNOSISDataset):
default_target_files = {'tissue': {'target':'mask_tissue.nii.gz', 'mask':'mask_breast.nii.gz'},
'breast': {'target':'mask_breast.nii.gz'}}
path_root_default = Path('/home/gustav/Documents/datasets/BreastDataset/BREAST-DIAGNOSIS-Subset')
class BreastDIAGNOSISSubsetDatasetLR(BreastDIAGNOSISSubsetDataset, BreastDIAGNOSISDatasetLR):
pass
class BreastDIAGNOSISSubsetDataset2D(BreastDIAGNOSISSubsetDataset, BreastDIAGNOSISDataset2D):
pass
class BreastDIAGNOSISSubsetDatasetLR2D(BreastDIAGNOSISSubsetDataset, BreastDIAGNOSISDatasetLR2D):
pass
# ------------------------------------------------- TCGA -------------------------------------------
class BreastTCGABRCADataset(BreastDataset):
path_root_default = Path('/mnt/hdd/datasets/breast/TCGA-BRCA/dataset')
default_target_files = {'tissue': {'target':'mask_tissue_3dunet.nii.gz', 'mask':'mask_breast_3dunet.nii.gz'},
'breast': {'target':'mask_breast_3dunet.nii.gz'}}
default_source_files = {'tissue': {'source': ['T1.nii.gz', 'sub.nii']}, 'breast':{'source':['T1.nii.gz']} }
@classmethod
def init_crawler(cls, path_root=None, **kwargs):
"""Return an iterator over all items e.g. path_root.rglob('*.jpg')"""
return ((path_sub_dir.relative_to(path_root),) for path_dir in Path(path_root).iterdir() if path_dir.is_dir()
for path_sub_dir in Path(path_dir).iterdir() if path_sub_dir.is_dir() )
@classmethod
def item_pointer2uid(cls, item_pointer, path_root=None, id_type='item', **kwargs):
"""Returns identifier for each item.
Args:
item_pointer (object): Unique pointer (e.g. Path or (Path, Slice)) to each item
path_root (Path, optional): Path to root directory
id_type (str, optional): Specifies the id-type e.g. case or study
"""
if id_type=="patient":
dicom_info = cls.load_dicom_tags(item_pointer, path_root=path_root, **kwargs)
return dicom_info["PatientID"] # Hint: PatientID (0010,0020)
if id_type == "study":
return item_pointer[0].parts[0] # e.g. 0001
elif id_type == "series":
return '_'.join(item_pointer[0].parts)
elif id_type == "item":
return '_'.join(item_pointer[0].parts) # e.g. 0001_08-12-2008
else:
raise "Unknown id_type"
@classmethod
def load_dicom_tags(cls, item_pointer, path_root=None, **kwargs):
path_root = cls.path_root_default if path_root is None else Path(path_root)
path_item = Path(path_root)/item_pointer[0]
file_name = kwargs.get('json_file', 'pre.json')
with open(path_item/file_name, 'r') as f:
dicom_tags = json.load(f)
return dicom_tags
class BreastTCGABRCADatasetLR(BreastDatasetLR, BreastTCGABRCADataset):
pass
def BreastDatasetCreator(dataset_name, cohort, use_2d=False, lateral='bilateral', **kwargs):
if dataset_name == 'uka':
if cohort == 'entire':
if (use_2d == True) and (lateral == 'bilateral'):
return BreastUKADataset2D(**kwargs)
elif (use_2d == True) and (lateral == 'unilateral'):
return BreastUKADatasetLR2D(**kwargs)
elif (use_2d == False) and (lateral == 'bilateral'):
return BreastUKADataset(**kwargs)
elif (use_2d == False) and (lateral == 'unilateral'):
return BreastUKADatasetLR(**kwargs)
elif cohort == 'subset':
if (use_2d == True) and (lateral == 'bilateral'):
return BreastDataset2D(**kwargs)
elif (use_2d == True) and (lateral == 'unilateral'):
return BreastDatasetLR2D(**kwargs)
elif (use_2d == False) and (lateral == 'bilateral'):
return BreastDataset(**kwargs)
elif (use_2d == False) and (lateral == 'unilateral'):
return BreastDatasetLR(**kwargs)
else:
raise Exception("Cohort name unknown.")
elif dataset_name == 'duke':
if cohort == 'entire':
if (use_2d == True) and (lateral == 'bilateral'):
return BreastDUKEDataset2D(**kwargs)
elif (use_2d == True) and (lateral == 'unilateral'):
return BreastDUKEDatasetLR2D(**kwargs)
elif (use_2d == False) and (lateral == 'bilateral'):
return BreastDUKEDataset(**kwargs)
elif (use_2d == False) and (lateral == 'unilateral'):
return BreastDUKEDatasetLR(**kwargs)
elif cohort == 'subset':
if (use_2d == True) and (lateral == 'bilateral'):
return BreastDUKESubsetDataset2D(**kwargs)
elif (use_2d == True) and (lateral == 'unilateral'):
return BreastDUKESubsetDatasetLR2D(**kwargs)
elif (use_2d == False) and (lateral == 'bilateral'):
return BreastDUKESubsetDataset(**kwargs)
elif (use_2d == False) and (lateral == 'unilateral'):
return BreastDUKESubsetDatasetLR(**kwargs)
else:
raise Exception("Cohort name unknown.")
elif dataset_name == 'breast-diagnosis':
if cohort == 'entire':
if (use_2d == True) and (lateral == 'bilateral'):
return BreastDIAGNOSISDataset2D(**kwargs)
elif (use_2d == True) and (lateral == 'unilateral'):
return BreastDIAGNOSISDatasetLR2D(**kwargs)
elif (use_2d == False) and (lateral == 'bilateral'):
return BreastDIAGNOSISDataset(**kwargs)
elif (use_2d == False) and (lateral == 'unilateral'):
return BreastDIAGNOSISDatasetLR(**kwargs)
elif cohort == 'subset':
if (use_2d == True) and (lateral == 'bilateral'):
return BreastDIAGNOSISSubsetDataset2D(**kwargs)
elif (use_2d == True) and (lateral == 'unilateral'):
return BreastDIAGNOSISSubsetDatasetLR2D(**kwargs)
elif (use_2d == False) and (lateral == 'bilateral'):
return BreastDIAGNOSISSubsetDataset(**kwargs)
elif (use_2d == False) and (lateral == 'unilateral'):
return BreastDIAGNOSISSubsetDatasetLR(**kwargs)
else:
raise Exception("Cohort name unknown.")
else:
raise Exception("Dataset name unknown.")
| 27,146 | 45.325939 | 208 | py |
LearningToSelect | LearningToSelect-main/UFET/parallel_TE_UFET.py | """BERT finetuning runner."""
# from __future__ import absolute_import, division, print_function
import bi_bert as db
import numpy as np
import torch
import random
import wandb
import argparse
from scipy.special import softmax
import torch.nn as nn
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel
from transformers.optimization import AdamW
from transformers import RobertaTokenizer
from transformers import RobertaModel #RobertaForSequenceClassification
class RobertaForTopKEntailment(nn.Module):
def __init__(self, K, len_tokenizer):
super(RobertaForTopKEntailment, self).__init__()
self.K = K
self.roberta= RobertaModel.from_pretrained(pretrain_model_dir, local_files_only = True)
self.roberta.resize_token_embeddings(len_tokenizer)
# if concat entity embed with typing embed, the input dim should be bert_hidden_dim * 2. Otherwise, bert_hidden_dim
self.mlp = nn.Sequential(nn.Linear(bert_hidden_dim, bert_hidden_dim),
nn.ReLU(),
nn.LayerNorm(bert_hidden_dim),
nn.Linear(bert_hidden_dim, 1))
def forward(self, input_ids, input_mask, segment_ids, entity_span_index, embedding_method):
outputs_single = self.roberta(input_ids, input_mask, None)
hidden_states = outputs_single[0] #torch.tanh(self.hidden_layer_2(torch.tanh(self.hidden_layer_1(outputs_single[1])))) #(batch, hidden)
slice_position = self.get_label_index(segment_ids)
# top_K_label_hidden_states shape: (batch_size, K, hidden)
top_K_label_hidden_states = self.get_label_embedding(hidden_states, slice_position, embedding_method)
# # entity_hidden_states shape: (batch_size, hidden)
# entity_hidden_states = self.get_entity_embedding(hidden_states, entity_span_index, embedding_method)
# # transform entity_hidden_states to the same shape as top_K_label_hidden_states so they can be concat at the final dim
# entity_hidden_states = torch.unsqueeze(entity_hidden_states, 1).expand(-1, self.K, -1)
# # concat each top-K label with the mentioned entity span in the statement
# entity_label_hidden = torch.cat((top_K_label_hidden_states, entity_hidden_states), axis = 2)
score_single = self.mlp(top_K_label_hidden_states) #(batch, K, 2) # top_K_label_hidden_states
return score_single
def get_entity_embedding(self, hidden_states, entity_span_index, flag):
entity_embed = []
for i_th_batch, index in enumerate(entity_span_index):
embed = hidden_states[i_th_batch][index[0]:index[1]]
if flag == 'mean':
embed = torch.mean(embed, 0)
if flag == 'sum':
embed = torch.sum(embed, 0)
entity_embed.append(embed)
entity_embed = torch.stack(entity_embed)
return entity_embed
def get_label_index(self, segment_ids):
"""
for each intent-top_K_label pair,
get the start and end postions for each label's tokens in the sequence.
This will help compute mean embeddings of a label
segment_ids: used to slice each label in the whole concat sequence
"""
slice_position = []
for i_th_label in np.arange(self.K):
row, column = np.where(segment_ids.cpu() == i_th_label)
for j_th_batch in np.arange(segment_ids.size()[0]):
position_in_column = np.where(row == j_th_batch)
start = np.min(position_in_column)
end = np.max(position_in_column)
i_th_label_start = column[start+1]
i_th_label_end = column[end]
slice_position.append((i_th_label_start, i_th_label_end))
slice_position = np.array(slice_position)
slice_position = slice_position.reshape(self.K, segment_ids.size()[0], 2)
slice_position = np.transpose(slice_position, (1, 0, 2))
return slice_position
def get_label_embedding(self, hidden_states, slice_position, flag):
"""
For all the top-K labels,
use their token embeddings' mean/sum to represent them
"""
top_K_label_hidden_states = torch.zeros((1, self.K, hidden_states.size()[2]))
for i_th_batch, slices in enumerate(slice_position):
sliced_embedding = []
for j_th_slice in slices:
# print(hidden_states[i_th_batch][j_th_slice[0]: j_th_slice[1], :])
label_embeddings = hidden_states[i_th_batch][j_th_slice[0]: j_th_slice[1], :]
if flag == 'mean':
label_embedding = torch.mean(label_embeddings, 0)
if flag == 'sum':
label_embedding = torch.sum(label_embeddings, 0)
sliced_embedding.append(label_embedding)
top_K_label_hidden_states = torch.cat((top_K_label_hidden_states.to('cuda'), torch.stack(sliced_embedding).unsqueeze(0)), 0)
return top_K_label_hidden_states[1:]
class InputFeatures():
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, class_segment_ids, entity_span_index):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.class_segment_ids = class_segment_ids
self.entity_span_index = entity_span_index
def load_all_data(data_path, type_path):
dataset, all_type_list, _ = db.load_data(data_path, type_path)
example_list = []
for data_index, data in enumerate(dataset):
left = data['left_context_token']
entity = data['mention_span']
right = data['right_context_token']
entity_typing = [' '.join(i.split('_')) for i in data['y_str']]
entitiy_typing_vanilla = data['y_str']
entity_typing_index = [all_type_list.index(i) for i in entity_typing]
left_str = ' '.join(left).lstrip()
right_str = ' '.join(right).lstrip()
if len(left) == 0:
statement = left_str + '{'+ entity + '}'+ ' ' + right_str
elif len(right) == 1:
statement = left_str + ' ' + '{'+ entity + '}' + right_str
else:
statement = left_str + ' ' + '{'+ entity + '}' + ' ' + right_str
positive_example = {'statement': statement, 'typing': entity_typing, 'typing_vanilla': entitiy_typing_vanilla, 'entity': entity}
example_list.append(positive_example)
return example_list
def convert_examples_to_features(flag, examples, entity_span_index_roberta, top_K_candidates, ground_truth_indicator, eval_class_list, max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=-2,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
class_map = {label : i for i, label in enumerate(eval_class_list)}
max_length_in_data = 0
features = []
for (ex_index, example) in enumerate(tqdm(examples, desc='constructing sequence')):
entity_span_index = entity_span_index_roberta[ex_index]
tokens = [cls_token]
token_intent = tokenizer.tokenize(example)
tokens += token_intent
segment_id_indicator = -1
segment_ids = [segment_id_indicator] * (len(tokens) + 1)
"""
class_segment_ids indicates a label's real id according to the class map
for all tokens of a same label, their corresponding class_segment_ids are the same
This is to help produce the prediction labels at inference stage
"""
class_segment_ids = [-1] * (len(tokens) + 1)
for candidate in top_K_candidates[ex_index]:
segment_id_indicator += 1
class_ids_indicator = class_map[candidate]
tokens += [sep_token] * 2
token_candidate = tokenizer.tokenize(candidate)
tokens += token_candidate
segment_ids += [segment_id_indicator] * (len(token_candidate) + 2)
class_segment_ids += [class_ids_indicator] * (len(token_candidate) + 2)
tokens += [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
max_length_in_data = max(max_length_in_data, len(input_ids))
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
class_segment_ids = ([pad_token_segment_id] * padding_length) + class_segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
class_segment_ids = class_segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(class_segment_ids) == max_seq_length
if flag == 'train':
label_id = ground_truth_indicator[ex_index]
else:
label_id = -1
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
class_segment_ids=class_segment_ids,
entity_span_index = entity_span_index))
print('max_length_in_data:', max_length_in_data)
return features
def examples_to_features(flag, source_examples, entity_span_index_roberta, top_K_candidates, ground_truth_indicator, eval_class_list, args, tokenizer, batch_size, dataloader_mode='sequential'):
source_features = convert_examples_to_features(flag,
source_examples, entity_span_index_roberta, top_K_candidates, ground_truth_indicator, eval_class_list, args.max_seq_length, tokenizer,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=-2)#4 if args.model_type in ['xlnet'] else 0,)
all_input_ids = torch.tensor([f.input_ids for f in source_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in source_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in source_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in source_features], dtype=torch.long)
all_class_segment_ids = torch.tensor([f.class_segment_ids for f in source_features], dtype=torch.long)
all_entity_span_index = torch.tensor([f.entity_span_index for f in source_features], dtype=torch.long)
data_tensor = TensorDataset(all_input_ids, all_input_mask,
all_segment_ids, all_label_ids,
all_class_segment_ids, all_entity_span_index)
if dataloader_mode=='sequential':
sampler = SequentialSampler(data_tensor)
else:
sampler = RandomSampler(data_tensor)
dataloader = DataLoader(data_tensor, sampler=sampler, batch_size=batch_size)
return dataloader
def extract_entity(data_list_roberta, roberta_tokenizer):
""" extract entity tokens from all tokens tokenized by RoBERTa """
# roberta <mask> token to seperate entity
entity_sep = roberta_tokenizer.encode(roberta_tokenizer.mask_token)[1]
input_ids = roberta_tokenizer(data_list_roberta, return_tensors='np', padding ='longest').input_ids
# input_ids = [np.array(i) for i in input_ids]
entity_span_index = np.where(input_ids == entity_sep)[1].reshape(-1, 2) - np.array([0, 1])
return entity_span_index
def get_entity_embedding(last_hidden_state, entity_span_index, flag = 'mean'):
entity_embedding_list = []
for hidden, span in zip(last_hidden_state, entity_span_index):
if flag == 'mean':
entity_embedding = torch.mean(hidden[span[0]:span[1]], 0)
# entity_embedding = torch.mean(torch.stack((entity_embedding, hidden[0])), 0)
elif flag == 'sum':
entity_embedding = torch.sum(hidden[span[0]:span[1]], 0)
# entity_embedding = torch.mean(torch.stack((entity_embedding, hidden[0])), 0)
entity_embedding_list.append(entity_embedding)
return torch.stack(entity_embedding_list)
def get_top_K_candidates(statement_model, types_vector_path, example_list, all_type_list, device, statement_tokenizer, roberta_tokenizer, args, flag):
# load all types embeddings
_, _, all_types_embedding_loaded = db.load_type_vector(types_vector_path)
data_list = np.array([[e['statement'], '-1'] for e in example_list]) # '-1' is meaningless. It is like a placeholder so data_list can be formatted to the input of db.examples_to_features()
data_list_roberta = [i[0].replace('{', '<mask>').replace('}', '<mask>') for i in data_list] # sep entity with <mask> tokens, so roberta can extract the entity easily
entity_span_index_roberta = extract_entity(data_list_roberta, roberta_tokenizer)
statement_list = [e['statement'].replace('{', '<ent><ent> ').replace('}', ' <ent><ent>') for e in example_list]
truth_label_list = [e['typing'] for e in example_list]
entity_list = [e['entity'] for e in example_list]
statement_dataloader = db.examples_to_features(data_list, statement_tokenizer, len(data_list), 'sequential')
ground_truth_indicator = []
# Get test entity embeddings
for _, batch in enumerate(tqdm(statement_dataloader, desc='Getting top-K')):
batch = tuple(t.to(device) for t in batch)
# Get input_ids(tokens) for statements
statement_input_ids, statement_input_mask, entity_span_index, statement_index = batch
# Get embeddings for entity span in each statement
with torch.no_grad():
statement_outputs = statement_model(statement_input_ids, statement_input_mask)
# the method of get_entity_embedding must be consistent with the pretrained top-k model. If the pretrained top-k model concat ebeb. of entity with whole sentence, in here it must be same. VICE VERSA.
entity_embedding_list = get_entity_embedding(statement_outputs[0], entity_span_index, args.embedding_method)
similarity = db.embedding_similarity(entity_embedding_list, all_types_embedding_loaded)
top_K_indice = [np.argsort(i)[-args.K:] for i in similarity]
top_K_candidates = [np.array(all_type_list)[i] for i in top_K_indice]
# compute recall Note: if recall is not consistent with the pretrained model, consider if '_' in typing is reserved AND check get_entity_embedding method.
recall_list = db.compute_recall(top_K_candidates, truth_label_list)
avg_recall = sum(recall_list)/len(recall_list)
print('!!!! Checking top-K-recall: [' + str(flag) + ']!!!!!:', avg_recall)
def duplicate_data(data, N):
"""
Duplicate each piece of training data N times
Data needs to be duplicated:
top_K_candidates
statement_list
truth_label_list
"""
data = np.array(data, dtype=object)
augmented_data = np.repeat(data, np.array([N for i in range(len(data))]), axis=0)
return augmented_data
if flag == 'train':
""" if all ground truth labels are in top-K candidates, if not, replace the
the class with smallest similarity with the ground truth """
# miss = 0
# for index, truth in enumerate(truth_label_list):
# top_K_set = set(top_K_candidates[index])
# truth_set = set(truth)
# if len(truth) == len(top_K_set & truth_set):
# continue
# else:
# miss += 1
# missed_label = truth_set - top_K_set
# replace_index = 0
# for label in missed_label:
# while (top_K_candidates[index][replace_index] in truth_set):
# # in case that the position will be replaced is already a true label
# replace_index += 1
# top_K_candidates[index][replace_index] = label
# replace_index += 1
# # print('miss index:', index)
# print(miss)
top_K_candidates = duplicate_data(top_K_candidates, args.N)
statement_list = duplicate_data(statement_list, args.N)
truth_label_list = duplicate_data(truth_label_list, args.N)
entity_span_index_roberta = duplicate_data(entity_span_index_roberta, args.N)
""" shuffle the order of top-K candidates for each piece of data to do Data Augmentation """
for index, candidates in enumerate(top_K_candidates):
np.random.shuffle(candidates)
top_K_candidates[index] = candidates
truth = truth_label_list[index]
# return a ground truth index indicator in candidates.
indicator = np.isin(np.asarray(top_K_candidates[index]), np.asarray(truth)).astype(int)
ground_truth_indicator.append(indicator)
return top_K_candidates, ground_truth_indicator, truth_label_list, statement_list, entity_span_index_roberta
def compute_recall(pred_list, ground_truth_list):
recall_list = []
for index, pred in enumerate(pred_list):
truth = ground_truth_list[index]
overlap = len(set(pred) & set(truth))
recall = overlap / (len(truth))
recall_list.append(recall)
return recall_list
def compute_precision(pred_list, ground_truth_list):
precision_list = []
for index, pred in enumerate(pred_list):
truth = ground_truth_list[index]
overlap = len(set(pred) & set(truth))
precision = overlap / (len(pred) + 1e-6)
precision_list.append(precision)
return precision_list
def compute_f1(pred_list, ground_truth_list):
recall_list = compute_recall(pred_list, ground_truth_list)
precision_list = compute_precision(pred_list, ground_truth_list)
f1_list = []
for recall, precision in zip(recall_list, precision_list):
f1 = 2 * (recall * precision) / (recall + precision + 1e-6)
f1_list.append(f1)
return recall_list, precision_list, f1_list
def groupby_label(label_indicator, len_test):
pred_label_index = []
label_for_current_data = []
p = 0
for i in range(len_test):
while label_indicator[p][0] == i:
label_for_current_data.append(label_indicator[p][1])
p = p + 1
if p == len(label_indicator):
break
pred_label_index.append(label_for_current_data)
label_for_current_data = []
return pred_label_index
def macro(gold, pred, len_test):
""" adopt from UFET paper codes """
def f1(p, r):
if r == 0.:
return 0.
return 2 * p * r / float(p + r)
num_examples = len_test
p = 0.
r = 0.
pred_example_count = 0.
pred_label_count = 0.
gold_label_count = 0.
for true_labels, predicted_labels in zip(gold, pred):
if predicted_labels:
pred_example_count += 1
pred_label_count += len(predicted_labels)
per_p = len(set(predicted_labels).intersection(set(true_labels))) / float(len(predicted_labels))
p += per_p
if len(true_labels):
gold_label_count += 1
per_r = len(set(predicted_labels).intersection(set(true_labels))) / float(len(true_labels))
r += per_r
if pred_example_count > 0:
precision = p / pred_example_count
if gold_label_count > 0:
recall = r / gold_label_count
avg_elem_per_pred = pred_label_count / pred_example_count
return num_examples, pred_example_count, avg_elem_per_pred, precision, recall, f1(precision, recall)
def metric(threshold_list, preds, top_K_candidates, truth_label_list, class_map, len_set):
macro_performance_list = []
for _, th in enumerate(tqdm(threshold_list, desc = 'searching threshold')):
try:
label_indicator = np.array(np.where(preds>th)) # for each piece of data, the indice of all the labels are predicted as truth
label_indicator = np.concatenate((label_indicator[0].reshape(-1, 1), label_indicator[1].reshape(-1, 1)), axis = 1) # reshape to fit groupby function
pred_label_index = groupby_label(label_indicator, len_set) # group by the label_indicator's first column. It is the predictions for each piece of data
pred_results = []
for top_k, label_index in zip(top_K_candidates, pred_label_index):
pred_results.append(top_k[label_index])
pred_results_index = [[class_map[i] for i in j] for j in pred_results]
test_ground_truth_class_id = [[class_map[i] for i in j] for j in truth_label_list]
recall_list, precision_list, f1_list = compute_f1(pred_results_index, test_ground_truth_class_id)
avg_recall = sum(recall_list)/len(recall_list)
avg_precision = sum(precision_list)/len(precision_list)
avg_f1 = sum(f1_list)/len(f1_list)
avg_performance = '(\navg recall: ' + str(avg_recall) + '\navg precision: ' + str(avg_precision) + '\navg f1: ' + str(avg_f1) + '\n)'
# print(avg_performance)
count, pred_count, avg_pred_count, macro_precision, macro_recall, macro_f1 = macro(test_ground_truth_class_id, pred_results_index, len_set)
macro_performance = (('threshold', f"{th:.4}"), ('P', f"{macro_precision:.2%}"), ('R', f"{macro_recall:.2%}"), ('F1', f"{macro_f1:.2%}"))
# print(macro_performance)
macro_performance_list.append(macro_performance)
except:
pass
macro_performance_list.sort(key = lambda x: -float(x[3][1].replace('%', 'e-2')))
return macro_performance_list
def evaluate(model, dev_dataloader, test_dataloader, device, dev_top_K_candidates, test_top_K_candidates, class_map, dev_truth_label_list, test_truth_label_list, args):
""" evaluate """
model.eval()
len_test = len(test_truth_label_list)
len_dev = len(dev_truth_label_list)
""" get pred for dev data """
dev_preds = []
for _, batch in enumerate(tqdm(dev_dataloader, desc="load dev data")):
input_ids, input_mask, segment_ids, label_ids, class_segment_ids, entity_span_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids, entity_span_index, args.embedding_method)
if len(dev_preds) == 0:
dev_preds.append(logits.detach().cpu().numpy())
else:
dev_preds[0] = np.append(dev_preds[0], logits.detach().cpu().numpy(), axis=0)
t = 0.5
dev_preds = dev_preds[0].reshape(dev_preds[0].shape[0], -1)
dev_preds = softmax(dev_preds/t,axis=1)
""" get pred for test data """
test_preds = []
for _, batch in enumerate(tqdm(test_dataloader, desc="load test data")):
input_ids, input_mask, segment_ids, label_ids, class_segment_ids, entity_span_index = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids, entity_span_index, args.embedding_method)
if len(test_preds) == 0:
test_preds.append(logits.detach().cpu().numpy())
else:
test_preds[0] = np.append(test_preds[0], logits.detach().cpu().numpy(), axis=0)
test_preds = test_preds[0].reshape(test_preds[0].shape[0], -1)
test_preds = softmax(test_preds/t,axis=1)
""" get best threshold for dev data """
dev_threshold_list = np.arange(args.threshold[0], args.threshold[1], 0.0001)
dev_macro_performance_list = metric(dev_threshold_list, dev_preds, dev_top_K_candidates, dev_truth_label_list, class_map, len_dev)
# get best threshold according to the dev set then apply it on test set
best_threshold = [float(dev_macro_performance_list[0][0][1])]
test_macro_performance_list = metric(best_threshold, test_preds, test_top_K_candidates, test_truth_label_list, class_map, len_test)
dev_performance = dev_macro_performance_list[0]
test_performance = test_macro_performance_list[0]
return dev_performance, test_performance
def main(args_train_batch_size, args_test_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_embedding_method, args_seed, args_eval_each_epoch, args_N, args_max_seq_length, args_threshold):
parser = argparse.ArgumentParser()
parser.add_argument("--train_batch_size",
default=args_train_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--test_batch_size",
default=args_test_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=args_learning_rate,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=args_num_train_epochs,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--eval_each_epoch",
default=args_eval_each_epoch,
action='store_true',
help="For each entity, sample its top similar negative labels to construct the negative pairs. If set to False, do random sample")
parser.add_argument("--embedding_method",
default=args_embedding_method,
type=str,
help="Use mean or sum to get embeddings")
parser.add_argument("--K",
default=args_K,
type=int,
help="Total number of top candidates selected")
parser.add_argument("--N",
default=args_N,
type=int,
help="The number of augmentation for each piece of data")
parser.add_argument("--ENABLE_WANDB",
default=args_ENABLE_WANDB,
action='store_true',
help="Use wandb or not.")
parser.add_argument('--seed',
type=int,
default=args_seed,
help="random seed for initialization")
parser.add_argument('--result_name',
type=str,
default=args_result_name)
parser.add_argument("--max_seq_length",
default=args_max_seq_length,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--threshold",
default=args_threshold,
type=str,
help="Threshold range.")
parser.add_argument('-f')
args = parser.parse_args()
args.threshold = [float(i) for i in args.threshold.split(',')]
device = torch.device("cuda")
""" set random seed """
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
""" load top-K selection model """
# One BERT to encode statements
statement_model = AutoModel.from_pretrained('bert-base-uncased', local_files_only = True).to(device)
statement_model.load_state_dict(torch.load(pretrained_statement_model_path))
statement_tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, local_files_only = True)
statement_model.to(device)
statement_model.eval()
""" load data """
_, all_type_list, all_type_list_vanilla= db.load_data(train_path, type_path)
train_example_list = load_all_data(train_path, type_path)
dev_example_list = load_all_data(dev_path, type_path)
test_example_list = load_all_data(test_path, type_path)
class_map = {label : i for i, label in enumerate(all_type_list)}
""" load top-k Entailment model """
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=True, local_files_only = True)
""" Add special token <ent> to seperate entity """
special_tokens_dict = {'additional_special_tokens': ['<ent>']}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
model = RobertaForTopKEntailment(args.K, len(tokenizer))
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate)
""" Top-K selection """
train_top_K_candidates, train_ground_truth_indicator, _, train_statement_list, train_entity_span_index_roberta = get_top_K_candidates(statement_model, types_vector_path, train_example_list ,all_type_list, device, statement_tokenizer, tokenizer, args, 'train')
dev_top_K_candidates, _, dev_truth_label_list, dev_statement_list, dev_entity_span_index_roberta = get_top_K_candidates(statement_model, types_vector_path, dev_example_list ,all_type_list, device, statement_tokenizer, tokenizer, args, 'dev')
test_top_K_candidates, _, test_truth_label_list, test_statement_list, test_entity_span_index_roberta = get_top_K_candidates(statement_model, types_vector_path, test_example_list ,all_type_list, device, statement_tokenizer, tokenizer, args, 'test')
test_ground_truth_class_id = [[class_map[i] for i in j] for j in test_truth_label_list]
dev_ground_truth_class_id = [[class_map[i] for i in j] for j in dev_truth_label_list]
print('Top-K selection')
""" ------------------- """
train_dataloader = examples_to_features('train', train_statement_list, train_entity_span_index_roberta, train_top_K_candidates, train_ground_truth_indicator, all_type_list, args, tokenizer, args.train_batch_size, dataloader_mode='random')
dev_dataloader = examples_to_features('dev', dev_statement_list, dev_entity_span_index_roberta, dev_top_K_candidates, dev_ground_truth_class_id, all_type_list, args, tokenizer, args.test_batch_size, dataloader_mode='sequential')
test_dataloader = examples_to_features('test', test_statement_list, test_entity_span_index_roberta, test_top_K_candidates, test_ground_truth_class_id, all_type_list, args, tokenizer, args.test_batch_size, dataloader_mode='sequential')
""" training """
performence_each_epoch = []
for epoch in range(args.num_train_epochs):
for _, batch in enumerate(tqdm(train_dataloader, desc='train|epoch'+str(epoch))):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, class_segment_ids, entity_span_index = batch
logits = model(input_ids, input_mask, segment_ids, entity_span_index, args.embedding_method)
bcsz= input_ids.shape[0]
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits.view(bcsz, -1), label_ids.to(device).float())
loss.backward()
optimizer.step()
optimizer.zero_grad()
if args.eval_each_epoch:
dev_performance, test_performance = evaluate(model, dev_dataloader, test_dataloader, device, dev_top_K_candidates, test_top_K_candidates, class_map, dev_truth_label_list, test_truth_label_list, args)
performence_each_epoch.append((dev_performance, test_performance))
print('dev_performance:', dev_performance)
print('test_performance:', test_performance)
print('-------------------')
if __name__ == "__main__":
args_train_batch_size = 8
args_test_batch_size = 1 #256
args_num_train_epochs = 0
args_learning_rate = 1e-5
args_ENABLE_WANDB = False
args_K = 80
args_N = 1
args_embedding_method = 'mean' # 'sum' or 'mean'
args_seed = 36
args_result_name = ''
args_eval_each_epoch = True
args_max_seq_length = 450 # 320 if K is 50. 450 if K is 80
args_threshold = '0,0.02'
bert_hidden_dim = 1024
pretrain_model_dir = 'roberta-large' #'roberta-large' , 'roberta-large-mnli', 'bert-large-uncased'
train_path = '../data/ultrafine_acl18/release/crowd/train.json'
dev_path = '../data/ultrafine_acl18/release/crowd/dev.json'
test_path = '../data/ultrafine_acl18/release/crowd/test.json'
type_path = '../data/ultrafine_acl18/release/ontology/types.txt'
types_vector_path = '../data/ultrafine_acl18/types_vector_768.txt'
pretrained_statement_model_path = './7728_model.pth'
main(args_train_batch_size, args_test_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_embedding_method, args_seed, args_eval_each_epoch, args_N, args_max_seq_length, args_threshold) | 36,027 | 46.405263 | 263 | py |
LearningToSelect | LearningToSelect-main/UFET/context_TE_UFET.py | import argparse
import csv
import logging
import json
import random
import sys
import numpy as np
import torch
import torch.nn as nn
from collections import defaultdict
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from torch.nn import CrossEntropyLoss
from scipy.special import softmax
# from scipy.stats import pearsonr, spearmanr
# from sklearn.metrics import matthews_corrcoef, f1_score
from transformers import RobertaTokenizer
from transformers.optimization import AdamW
from transformers import RobertaModel#RobertaForSequenceClassification
from torch.utils.data import Dataset
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
bert_hidden_dim = 1024
pretrain_model_dir = 'roberta-large' #'roberta-large' , 'roberta-large-mnli', 'bert-large-uncased'
class TypingDataset(Dataset):
def __init__(self, data_file, label_file):
self.data = []
with open(label_file, "r", encoding="utf-8") as fin:
label_lst = []
for lines in fin:
lines = lines.split()[0]
lines = ' '.join(lines.split('_'))
label_lst.append(lines)
self.label_lst = label_lst
self.general_lst = label_lst[0:9]
self.fine_lst = label_lst[9:130]
self.ultrafine_lst = label_lst[130:]
with open(data_file, "r", encoding="utf-8") as f:
lines = f.read().splitlines()
for line in lines:
line = json.loads(line)
premise = line['premise']
entity = line['entity']
# could truncate generated annotation
annotation = line['annotation']
annotation = [' '.join(a.split('_')) for a in annotation]
top_k = line['top_k']
top_k = [' '.join(a.split('_')) for a in top_k]
pos_in_top_k = list(set(annotation).intersection(set(top_k)))
annotation_general = list(set(annotation).intersection(set(self.general_lst)))
annotation_fine = list(set(annotation).intersection(set(self.fine_lst)))
annotation_ultrafine = list(set(annotation).intersection(set(self.ultrafine_lst)))
self.data.append([premise, entity, annotation, annotation_general, annotation_fine, annotation_ultrafine, pos_in_top_k, top_k])
def __getitem__(self, idx):
return self.data[idx]
def __len__(self):
return len(self.data)
class RobertaForSequenceClassification(nn.Module):
def __init__(self, tagset_size):
super(RobertaForSequenceClassification, self).__init__()
self.tagset_size = tagset_size
self.roberta_single= RobertaModel.from_pretrained(pretrain_model_dir)
self.single_hidden2tag = RobertaClassificationHead(bert_hidden_dim, tagset_size)
def forward(self, input_ids, input_mask):
outputs_single = self.roberta_single(input_ids, input_mask, None)
# print('\noutputs_single:', outputs_single[0][:,0,:])
hidden_states_single = outputs_single[1]#torch.tanh(self.hidden_layer_2(torch.tanh(self.hidden_layer_1(outputs_single[1])))) #(batch, hidden)
# print('hidden_states_single:',
score_single = self.single_hidden2tag(hidden_states_single) #(batch, tag_set)
return score_single
class RobertaClassificationHead(nn.Module):
def __init__(self, bert_hidden_dim, num_labels):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(bert_hidden_dim, bert_hidden_dim)
self.dropout = nn.Dropout(0.1)
self.out_proj = nn.Linear(bert_hidden_dim, num_labels)
def forward(self, features):
x = features#[:, 0, :] # take <s> token (equiv. to [CLS])
# print('\nfeatures:', x)
# print('feature size:', x.size())
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def load_all_data(data_path):
example_list = []
with open(data_path, newline='') as f:
for row in f:
utterance = row.split('\t')[1].strip()
label = row.split('\t')[0].strip()
positive_example = {'utterance': utterance, 'class': label}
example_list.append(positive_example)
return example_list
def load_categories(label_file):
with open(label_file, "r", encoding="utf-8") as fin:
label_lst = []
for lines in fin:
lines = lines.split()[0]
lines = ' '.join(lines.split('_'))
label_lst.append(lines)
return label_lst
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, text_a, text_b=None, entity = None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.text_a = text_a
self.text_b = text_b
self.entity = entity
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def load_train(self, data_file, label_file):
examples = []
with open(label_file, "r", encoding="utf-8") as fin:
label_lst = []
for lines in fin:
lines = lines.split()[0]
lines = ' '.join(lines.split('_'))
label_lst.append(lines)
self.label_lst = label_lst
self.general_lst = label_lst[0:9]
self.fine_lst = label_lst[9:130]
self.ultrafine_lst = label_lst[130:]
with open(data_file, "r", encoding="utf-8") as f:
lines = f.read().splitlines()
top_k_candidates = []
group_start_idlist = [0]
for _, line in enumerate(tqdm(lines, desc='constructing training pairs')):
line = json.loads(line)
premise = line['premise']
entity = line['entity']
# could truncate generated annotation
annotation = line['annotation']
annotation = [' '.join(a.split('_')) for a in annotation]
top_k = line['top_k']
top_k = [' '.join(a.split('_')) for a in top_k]
top_k_candidates.append(top_k)
pos_in_top_k = list(set(annotation).intersection(set(top_k)))
annotation_general = list(set(annotation).intersection(set(self.general_lst)))
annotation_fine = list(set(annotation).intersection(set(self.fine_lst)))
annotation_ultrafine = list(set(annotation).intersection(set(self.ultrafine_lst)))
for typing in annotation:
examples.append( InputExample(text_a=premise, text_b=typing, entity = entity, label='entailment'))
# negative_class_set = set(self.label_lst)-set(annotation)
# negative_class_set = [tmp for tmp in self.label_lst if tmp not in annotation]
negative_class_set = set(top_k)-set(annotation)
for typing in negative_class_set:
examples.append( InputExample(text_a=premise, text_b=typing, entity = entity, label='non-entailment'))
next_start = group_start_idlist[-1] + len(annotation) + len(negative_class_set)
group_start_idlist.append(next_start)
return examples, top_k_candidates, group_start_idlist
def get_labels(self):
'here we keep the three-way in MNLI training '
return ["entailment", "not_entailment"]
# return ["entailment", "neutral", "contradiction"]
def convert_examples_to_features(examples, label_list, eval_class_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples, desc='writing example')):
# if ex_index % 10000 == 0:
# logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
hypo = ' '.join([example.entity, 'is a', example.text_b +'.'])
tokens_b = tokenizer.tokenize(hypo)
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_features_concatenate(examples, label_list, eval_class_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True,
top_k_candidates = [], group_start_idlist = []):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
max_len = 0
features = []
# group_start_idlist = [75 * i for i in range(len(examples)//75)]
for idx, group_id in enumerate(tqdm(zip(group_start_idlist, group_start_idlist[1:]), desc='writing concat example', total=len(group_start_idlist[1:]))):
# print(group_id)
sub_examples = examples[group_id[0]:group_id[1]]
for (ex_index, example) in enumerate(sub_examples):
# if ex_index % 10000 == 0:
# logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
hypo = ' '.join([example.entity, 'is a', example.text_b +'.'])
tokens_b = tokenizer.tokenize(hypo)
'''something added'''
# other_3_examples_in_the_group = [ex_i for ex_i in sub_examples if ex_i.text_b != example.text_b]
top_k_for_this_example = [ex_i for ex_i in top_k_candidates[idx] if ex_i != example.text_b]
# for ex_i in sub_examples:
# if ex_i.text_b != example.text_b:
tokens_b_concatenated = []
# tokens_b_concatenated.append(tokens_b+[sep_token]+tokens_b+[sep_token]+tokens_b+[sep_token]+tokens_b)
for ii in range(1):
prob = random.random()
if prob <= 0.6:
random.shuffle(top_k_for_this_example)
tail_seq = []
for ex_i in top_k_for_this_example:
tail_seq += [sep_token]+tokenizer.tokenize(ex_i)+[sep_token]
tokens_b_concatenated.append(tokens_b+[sep_token]+tail_seq)
for tokens_b in tokens_b_concatenated:
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
tokens += tokens_b
segment_ids += [sequence_b_segment_id] * (len(tokens_b))
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
max_len = max(max_len, len(input_ids))
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
print(f'!!!!!!!max len: {max_len}!!!!!!!!')
return features
def main(args_task_name, args_cache_dir, args_round_name,
args_max_seq_length, args_do_train, args_do_eval,
args_do_lower_case, args_train_batch_size,
args_eval_batch_size, args_learning_rate,
args_num_train_epochs, args_warmup_proportion,
args_no_cuda, args_local_rank, args_seed,
args_gradient_accumulation_steps, args_fp16,
args_loss_scale, args_server_ip, args_server_port, args_train_file):
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--task_name",
default=args_task_name,
type=str,
required=False,
help="The name of the task to train.")
## Other parameters
parser.add_argument("--train_file",
default=args_train_file,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--cache_dir",
default=args_cache_dir,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--round_name",
default=args_round_name,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--result_name",
type=str,
help="result output file name")
parser.add_argument("--max_seq_length",
default=args_max_seq_length,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
default=args_do_train,
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
default = args_do_eval,
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
default = args_do_lower_case,
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=args_train_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=args_eval_batch_size,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=args_learning_rate,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=args_num_train_epochs,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=args_warmup_proportion,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
default=args_no_cuda,
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=args_local_rank,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=args_seed,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=args_gradient_accumulation_steps,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
default = args_fp16,
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float,
default=args_loss_scale,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--threshold",
default='0,1',
type=str,
help="Threshold range.")
parser.add_argument('--server_ip', type=str, default=args_server_ip, help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default=args_server_port, help="Can be used for distant debugging.")
parser.add_argument('-f')
args = parser.parse_args()
args.threshold = [float(i) for i in args.threshold.split(',')]
processors = {
"rte": RteProcessor
}
output_modes = {
"rte": "classification"
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
train_path = './data/' +str(args.train_file) +'.json'
dev_path ='./data/dev_processed.json'
test_path = './data/test_processed.json'
""" load data """
category_path = './data/types.txt'
model = RobertaForSequenceClassification(3)
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)
model.load_state_dict(torch.load(mnli_model), strict=False)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate)
processor = processors[task_name]()
output_mode = output_modes[task_name]
'''load training in list'''
train_examples_list, train_top_K_candidates, group_start_idlist = processor.load_train(train_path, category_path) # no odd training examples
'''dev and test'''
# dev data
dev_dataset = TypingDataset(dev_path, category_path)
# test data
test_dataset = TypingDataset(test_path, category_path)
entail_class_list = ['entailment', 'non-entailment']
eval_class_list = []
train_features = convert_examples_to_features(
train_examples_list, entail_class_list, eval_class_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
print(f'num train features: {len(train_features)}')
train_features_concatenate = convert_examples_to_features_concatenate(
train_examples_list, entail_class_list, eval_class_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0, top_k_candidates = train_top_K_candidates, group_start_idlist = group_start_idlist)
print(f'num train features concat: {len(train_features_concatenate)}')
train_features+=train_features_concatenate
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
'''training'''
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_features))
logger.info(" Batch size = %d", args.train_batch_size)
max_test_acc = 0.0
max_dev_acc = 0.0
for epoch_i in range(args.num_train_epochs):
for _, batch in enumerate(tqdm(train_dataloader, desc='train|epoch_'+str(epoch_i))):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, _, label_ids = batch
logits = model(input_ids, input_mask)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, 3), label_ids.view(-1))
# print("\nloss:", loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
'''evaluation'''
evaluate(args, dev_dataset, model, tokenizer, epoch_i, device)
def evaluate(args, eval_dataset, model, tokenizer, global_step, device):
model.eval()
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=lambda x: zip(*x))
dev_output_scores = []
dev_all_top_k = []
dev_all_truth = []
dev_epoch_iterator = tqdm(eval_dataloader, desc="evaluating")
for step, batch in enumerate(dev_epoch_iterator):
premise_lst, entity_lst, pos_lst, pos_general_lst, pos_fine_lst, pos_ultrafine_lst, pos_in_top_k, top_k = [list(item) for item in batch]
data_combo = []
for idx in range(len(premise_lst)):
premise = premise_lst[idx]
entity = entity_lst[idx]
truth_typing = pos_lst[idx]
top_k_typing = top_k[idx]
top_k_typing = [' '.join(a.split('_')) for a in top_k_typing]
for typing in top_k_typing:
input_temp = ' '.join([premise, 2*tokenizer.sep_token, entity, 'is a', typing+'.'])
data_combo.append(input_temp)
# true
model_inputs = tokenizer(data_combo, padding=True, return_tensors='pt')
model_inputs = model_inputs.to(device)
input_ids = model_inputs['input_ids']
input_mask = model_inputs['attention_mask']
with torch.no_grad():
output = model(input_ids, input_mask)
output = nn.functional.softmax(output, dim=-1)[:, 0]
dev_output = output.reshape(len(premise_lst), len(top_k[0]))
dev_output_scores.append(dev_output)
dev_all_top_k += top_k
dev_all_truth += pos_lst
# for candidate, i in zip(top_k, pred_indicator):
# preds.append([candidate[i]])
dev_output_scores = torch.cat(dev_output_scores, dim=0)
dev_threshold_list = np.arange(args.threshold[0], args.threshold[1], 0.002)
dev_macro_performance_list = metric(dev_threshold_list, dev_all_top_k, dev_output_scores, dev_all_truth, len(dev_output_scores))
dev_performance = dev_macro_performance_list[0]
print('dev_performance:', dev_performance)
def macro(gold, pred, len_test):
""" adopt from UFET paper codes """
def f1(p, r):
if r == 0.:
return 0.
return 2 * p * r / float(p + r)
num_examples = len_test
p = 0.
r = 0.
pred_example_count = 0.
pred_label_count = 0.
gold_label_count = 0.
for true_labels, predicted_labels in zip(gold, pred):
if predicted_labels:
pred_example_count += 1
pred_label_count += len(predicted_labels)
per_p = len(set(predicted_labels).intersection(set(true_labels))) / float(len(predicted_labels))
p += per_p
if len(true_labels):
gold_label_count += 1
per_r = len(set(predicted_labels).intersection(set(true_labels))) / float(len(true_labels))
r += per_r
if pred_example_count > 0:
precision = p / pred_example_count
if gold_label_count > 0:
recall = r / gold_label_count
avg_elem_per_pred = pred_label_count / pred_example_count
return num_examples, pred_example_count, avg_elem_per_pred, precision, recall, f1(precision, recall)
def metric(threshold_list, all_top_k, output_scores, all_truth, len_set):
macro_performance_list = []
for _, th in enumerate(tqdm(threshold_list, desc = 'searching threshold')):
try:
pred_indicator = torch.where(output_scores>th, True, False).cpu().numpy()
preds = []
for candidate, i in zip(all_top_k, pred_indicator):
preds.append(list(np.array(candidate)[i]))
count, pred_count, avg_pred_count, macro_precision, macro_recall, macro_f1 = macro(all_truth, preds, len_set)
macro_performance = (('threshold', f"{th:.4}"), ('P', f"{macro_precision:.2%}"), ('R', f"{macro_recall:.2%}"), ('F1', f"{macro_f1:.2%}"))
# print(macro_performance)
macro_performance_list.append(macro_performance)
except:
pass
macro_performance_list.sort(key = lambda x: -float(x[3][1].replace('%', 'e-2')))
return macro_performance_list
if __name__ == "__main__":
args_task_name = 'rte'
args_cache_dir = ''
args_round_name = 'r1'
args_max_seq_length = 410
args_do_train = True
args_do_eval = False
args_do_lower_case = True
args_train_batch_size = 8
args_eval_batch_size = 16
args_learning_rate = 5e-6
args_num_train_epochs = 5
args_warmup_proportion = 0.1
args_no_cuda = False
args_local_rank = -1
args_seed = 42
args_gradient_accumulation_steps = 1
args_fp16 = False
args_loss_scale = 0
args_server_ip = ''
args_server_port = ''
args_train_file = 'train_processed'
""" LOCAL """
mnli_model = './MNLI_pretrained.pt'
main(args_task_name, args_cache_dir, args_round_name,
args_max_seq_length, args_do_train, args_do_eval,
args_do_lower_case, args_train_batch_size,
args_eval_batch_size, args_learning_rate,
args_num_train_epochs, args_warmup_proportion,
args_no_cuda, args_local_rank, args_seed,
args_gradient_accumulation_steps, args_fp16,
args_loss_scale, args_server_ip, args_server_port, args_train_file) | 35,757 | 41.117786 | 156 | py |
LearningToSelect | LearningToSelect-main/UFET/bi_bert.py |
import numpy as np
import torch
import json
import wandb
import argparse
from sklearn.metrics import pairwise
from torch.nn import CosineEmbeddingLoss
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel
from transformers.optimization import AdamW
def load_data(data_path, type_path):
dataset = []
with open(data_path) as f:
for line in f:
dataset.append(json.loads(line))
all_type_list = []
with open(type_path) as f:
for line in f:
all_type_list.append(line.strip())
all_type_list_vanilla = all_type_list
all_type_list = [' '.join(i.split('_')) for i in all_type_list]
# print(len(all_type_list))
return dataset, all_type_list, all_type_list_vanilla
def load_test_data(data_path):
dataset = []
with open(data_path) as f:
for line in f:
dataset.append(json.loads(line))
return dataset
def parse_data(dataset, all_type_list):
truth_label_list = []
truth_label_list_vanilla = []
truth_label_index_list = []
data_list = [] #[[statement1, statement_index], ...]
for data_index, data in enumerate(dataset):
left = data['left_context_token']
entity = data['mention_span']
right = data['right_context_token']
entity_typing = [' '.join(i.split('_')) for i in data['y_str']]
entitiy_typing_vanilla = data['y_str']
entity_typing_index = [all_type_list.index(i) for i in entity_typing]
left_str = ' '.join(left).lstrip()
right_str = ' '.join(right).lstrip()
if len(left) == 0:
statement = left_str + '{'+ entity + '}'+ ' ' + right_str
elif len(right) == 1:
statement = left_str + ' ' + '{'+ entity + '}' + right_str
else:
statement = left_str + ' ' + '{'+ entity + '}' + ' ' + right_str
data_list.append([statement, data_index])
truth_label_list.append(entity_typing)
truth_label_list_vanilla.append(entitiy_typing_vanilla)
truth_label_index_list.append(entity_typing_index)
data_list = np.array(data_list)
truth_label_index_list = np.array(truth_label_index_list)
return data_list, truth_label_list, truth_label_list_vanilla, truth_label_index_list
'''
[
[statement1, statement_index],
[statement2, statemend_index]
]
'''
def tokenize_all_types(all_type_list, tokenizer):
""" Tokenize all types """
all_type_token = tokenizer(all_type_list, return_tensors="pt", padding = 'longest')
return all_type_token
def tokenize_data(data, tokenizer):
"""
Tokenize the statement, get the entity span index after tokenization
so the embedding of entity can be extracted later
"""
statement_index = torch.tensor(data[:,1].astype(int))
statement_token = tokenizer(list(data[:,0]), return_tensors="pt", padding = 'longest')
# use {} to seperate entity span
entity_sep_left = tokenizer.encode('{')[1]
entity_sep_right = tokenizer.encode('}')[1]
input_ids = statement_token.input_ids
# locate '{' and '}' to find the location of entity span
entity_sep_left_index = torch.where(input_ids== entity_sep_left)[1]
entity_sep_right_index = torch.where(input_ids == entity_sep_right)[1] -1
span_start_index = entity_sep_left_index
span_end_index = entity_sep_right_index
entity_span_index = torch.transpose(torch.stack((span_start_index, span_end_index)), 1, 0)
def remove_entity_sep(inputs, entity_sep_left_index, entity_sep_right_index):
""" remove '{' and '}' """
mask = torch.ones_like(inputs).scatter_(1, entity_sep_left_index.unsqueeze(1), 0.)
inputs = inputs[mask.bool()].view(inputs.shape[0], inputs.shape[1]-1)
mask = torch.ones_like(inputs).scatter_(1, entity_sep_right_index.unsqueeze(1), 0.)
inputs = inputs[mask.bool()].view(inputs.shape[0], inputs.shape[1]-1)
return inputs
statement_token.input_ids = remove_entity_sep(statement_token.input_ids, entity_sep_left_index, entity_sep_right_index)
statement_token.attention_mask = remove_entity_sep(statement_token.attention_mask, entity_sep_left_index, entity_sep_right_index)
# for idx, i in enumerate(entity_span_index):
# entity_decode = tokenizer.decode(statement_token.input_ids[idx][i[0]:i[1]])
# print(entity_decode)
return statement_token, entity_span_index, statement_index
def examples_to_features(data, tokenizer, batch_size, dataloader_mode):
source_features, all_entity_span_index, all_statement_index = tokenize_data(data, tokenizer)
all_input_ids = torch.stack([f for f in source_features.input_ids])
all_input_mask = torch.stack([f for f in source_features.attention_mask])
data_tensor = TensorDataset(all_input_ids, all_input_mask, all_entity_span_index, all_statement_index)
if dataloader_mode=='sequential':
sampler = SequentialSampler(data_tensor)
else:
sampler = RandomSampler(data_tensor)
dataloader = DataLoader(data_tensor, sampler=sampler, batch_size=batch_size)
return dataloader
def get_entity_embedding(last_hidden_state, entity_span_index, flag = 'mean'):
entity_embedding_list = []
for hidden, span in zip(last_hidden_state, entity_span_index):
if flag == 'mean':
entity_embedding = torch.mean(hidden[span[0]:span[1]], 0)
entity_embedding = torch.mean(torch.stack((entity_embedding, hidden[0])), 0)
elif flag == 'sum':
entity_embedding = torch.sum(hidden[span[0]:span[1]], 0)
entity_embedding = torch.mean(torch.stack((entity_embedding, hidden[0])), 0)
entity_embedding_list.append(entity_embedding)
return torch.stack(entity_embedding_list)
def get_label_embedding(last_hidden_state, label_attention_mask, flag = 'mean'):
label_embedding_list = []
for hidden, mask in zip(last_hidden_state, label_attention_mask):
hidden = hidden[mask.bool()]
if flag == 'mean':
label_embedding = torch.mean(hidden[1:-1], 0)
elif flag == 'sum':
label_embedding = torch.sum(hidden[1:-1], 0)
label_embedding_list.append(label_embedding)
return torch.stack(label_embedding_list)
def get_optimizer(model, learning_rate):
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
return AdamW(optimizer_grouped_parameters, lr=learning_rate)
def load_type_vector(types_vector_path):
# load all type vectors
type_embedding_dict = {}
all_types_list_loaded = []
all_types_embedding_loaded = []
with open(types_vector_path, 'r', encoding='utf-8') as f:
for line in f:
values = line.split()
type = values[0]
vector = np.asarray(values[1:], "float32")
type_embedding_dict[type] = vector
all_types_list_loaded.append(type)
all_types_embedding_loaded.append(vector)
all_types_embedding_loaded = torch.tensor(all_types_embedding_loaded)
return type_embedding_dict, all_types_list_loaded, all_types_embedding_loaded
def embedding_similarity(entity_embeddings, type_embeddings):
entity_embeddings = entity_embeddings.detach()
similarity = pairwise.cosine_similarity(entity_embeddings.cpu(), type_embeddings.cpu())
return similarity
def compute_recall(top_k_list, ground_truth_list):
recall_list = []
for index, top_k in enumerate(top_k_list):
truth = ground_truth_list[index]
overlap = len(set(top_k) & set(truth))
recall = overlap / (len(truth))
recall_list.append(recall)
return recall_list
def sample_negative_type(negative_label_in_batch, all_type_token):
neg_label_input_ids = torch.cat([all_type_token.input_ids[negative_label_in_batch[i]] for i in range(negative_label_in_batch.shape[0])])
neg_label_input_mask = torch.cat([all_type_token.attention_mask[negative_label_in_batch[i]] for i in range(negative_label_in_batch.shape[0])])
return neg_label_input_ids, neg_label_input_mask
def sample_positive_type(truth_label_in_batch, all_type_token):
pos_label_input_ids = torch.cat([all_type_token.input_ids[truth_label_in_batch[i]] for i in range(truth_label_in_batch.shape[0])])
pos_label_input_mask = torch.cat([all_type_token.attention_mask[truth_label_in_batch[i]] for i in range(truth_label_in_batch.shape[0])])
return pos_label_input_ids, pos_label_input_mask
def align_pairs(label_in_batch, entity_embedding_list, device, flag = 'positive'):
"""
Repeat each statement in the batch to match its corresponding labels to construct pos/neg pairs. Num of pos/neg pairs for each statment == Num of pos/neg labels
"""
num_pair = torch.tensor([len(i) for i in label_in_batch]).to(device)
entity_embedding = torch.repeat_interleave(entity_embedding_list, num_pair, dim=0)
target = torch.ones(entity_embedding.shape[0], dtype=int).to(device)
if flag == 'negative':
target = torch.neg(target)
return entity_embedding, target
def get_top_negative_index(label_model, negative_label_in_batch, all_type_token, entity_embedding_list, N_neg_sample, embedding_method):
"""
For each entity, sample its top similar negative labels to construct the negative pairs
N_neg_sample: control the number of samples
"""
# Get embeddings for all types
with torch.no_grad():
all_type_outputs = label_model(all_type_token.input_ids, all_type_token.attention_mask)
all_type_embeddings = get_label_embedding(all_type_outputs[0], all_type_token.attention_mask, embedding_method)
# For each entity, compute its similarity between all types, then extract similarities of negative types
similarity = embedding_similarity(entity_embedding_list, all_type_embeddings)
neg_similarity = [similarity[i][negative_label_in_batch[i]] for i in range(negative_label_in_batch.shape[0])]
# Sample top #N_neg_sample similar negative labels
top_indice = np.array([np.argsort(i)[-N_neg_sample:] for i in neg_similarity])
return top_indice
def save_model(statement_model, label_model, statement_model_save_dir, label_model_save_dir, args):
""" Saving model """
print('Saving models ...')
torch.save(statement_model.state_dict(), statement_model_save_dir + args.statement_model_save_path)
torch.save(label_model.state_dict(), label_model_save_dir + args.label_model_save_path)
def write_type_vector(statement_model, label_model, all_type_token, types_vector_path, all_type_list_vanilla, args):
""" Writing label vector into a file """
statement_model.eval()
label_model.eval()
all_type_input_ids = all_type_token.input_ids
all_type_attention_mask = all_type_token.attention_mask
with torch.no_grad():
all_type_outputs = label_model(all_type_input_ids, all_type_attention_mask)
all_type_embeddings = get_label_embedding(all_type_outputs[0], all_type_attention_mask, args.embedding_method)
with open(types_vector_path, 'w') as f:
for type, vector in tqdm(zip(all_type_list_vanilla, all_type_embeddings), total=len(all_type_embeddings), desc='writing vectors'):
f.write(str(type) + ' ')
f.write(' '.join([str(i) for i in vector.tolist()]) + '\n')
def evaluate(statement_model, types_vector_path, all_type_list, device, test_path, tokenizer, args, result_dir, flag):
# load all types embeddings
type_embedding_dict, all_types_list_loaded, all_types_embedding_loaded = load_type_vector(types_vector_path)
test_data_list, _ , test_truth_label_list, _ = parse_data(load_test_data(test_path), all_type_list)
test_statement_dataloader = examples_to_features(test_data_list, tokenizer, len(test_data_list), 'sequential')
# Get test entity embeddings
for _, batch in enumerate(tqdm(test_statement_dataloader, desc='Evaluate')):
batch = tuple(t.to(device) for t in batch)
# Get input_ids(tokens) for statements
statement_input_ids, statement_input_mask, entity_span_index, statement_index = batch
# Get embeddings for entity span in each statement
with torch.no_grad():
statement_outputs = statement_model(statement_input_ids, statement_input_mask)
test_entity_embedding_list = get_entity_embedding(statement_outputs[0], entity_span_index, args.embedding_method)
# For each entity, compute its similarity between all types, then select top K types
similarity = embedding_similarity(test_entity_embedding_list, all_types_embedding_loaded)
all_recall = []
for top_k in args.K:
top_K_indice = [np.argsort(i)[-top_k:] for i in similarity]
top_K_candidates = [np.array(all_types_list_loaded)[i] for i in top_K_indice]
# compute recall
recall_list = compute_recall(top_K_candidates, test_truth_label_list)
# for index, i in enumerate(recall_list):
# print(i, len(top_K_candidates[index]))
avg_recall = sum(recall_list)/len(recall_list)
all_recall.append(('Top ' + str(top_k), avg_recall))
# print('all_recall', all_recall)
return all_recall
def main(args_train_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_N_neg_sample, args_embedding_method, args_seed, args_statement_model_save_path, args_label_model_save_path, args_sample_top_neg, args_eval_each_epoch):
parser = argparse.ArgumentParser()
parser.add_argument("--statement_model_save_path",
default=args_statement_model_save_path,
type=str)
parser.add_argument("--label_model_save_path",
default=args_label_model_save_path,
type=str)
parser.add_argument("--train_batch_size",
default=args_train_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=args_learning_rate,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=args_num_train_epochs,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--N_neg_sample",
default=args_N_neg_sample,
type=int,
help="Total number of negative pair sampled for each entity.")
parser.add_argument("--sample_top_neg",
default=args_sample_top_neg,
action='store_true',
help="For each entity, sample its top similar negative labels to construct the negative pairs. If set to False, do random sample")
parser.add_argument("--eval_each_epoch",
default=args_eval_each_epoch,
action='store_true',
help="For each entity, sample its top similar negative labels to construct the negative pairs. If set to False, do random sample")
parser.add_argument("--embedding_method",
default=args_embedding_method,
type=str,
help="Use mean or sum to get embeddings")
parser.add_argument("--K",
default=args_K,
type=str,
help="Total number of top candidates selected")
parser.add_argument("--ENABLE_WANDB",
default=args_ENABLE_WANDB,
action='store_true',
help="Use wandb or not.")
parser.add_argument('--seed',
type=int,
default=args_seed,
help="random seed for initialization")
parser.add_argument('--result_name',
type=str,
default=args_result_name)
parser.add_argument('-f')
args = parser.parse_args()
args.K = [int(i) for i in args.K.split(',')]
label_model_save_path_vanilla = args.label_model_save_path
statement_model_save_path_vanilla = args.statement_model_save_path
train_path = '../data/ultrafine_acl18/release/crowd/train.json'
dev_path = '../data/ultrafine_acl18/release/crowd/dev.json'
test_path = '../data/ultrafine_acl18/release/crowd/test.json'
type_path = '../data/ultrafine_acl18/release/ontology/types.txt'
types_vector_path = '../data/types_vector_768-test.txt'
statement_model_save_dir = '../saved_model/statement_model/'
label_model_save_dir = '../saved_model/label_model/'
result_dir = '../results_top_k/'
pretrain_model_dir = 'bert-base-cased'
# pretrain_model_dir = 'roberta-large'
device = torch.device("cuda")
if args.ENABLE_WANDB:
wandb.setup(wandb.Settings(program="dual_bert.py", program_relpath="dual_bert.py"))
wandb.init(project="dual-bert", entity="jiangshd")
# Two different BERT models
# One BERT to encode statements
statement_model = AutoModel.from_pretrained(pretrain_model_dir, local_files_only = True).to(device)
# # train from last check point
# statement_model.load_state_dict(torch.load(statement_model_save_dir + 'ep2-model-2022-04-03T11.32.37.3915935-05.00.pth'))
# Another BERT to encode labels
label_model = AutoModel.from_pretrained(pretrain_model_dir, local_files_only = True).to(device)
# # train from last check point
# label_model.load_state_dict(torch.load(label_model_save_dir + 'ep2-model-2022-04-03T11.32.37.3915935-05.00.pth'))
statement_optimizer = get_optimizer(statement_model, args.learning_rate)
label_optimizer = get_optimizer(label_model, args.learning_rate)
tokenizer = AutoTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=True, local_files_only = True)
dataset, all_type_list, all_type_list_vanilla= load_data(train_path, type_path)
data_list, truth_label_list, _, truth_label_index_list = parse_data(dataset, all_type_list)
all_type_token = tokenize_all_types(all_type_list, tokenizer).to(device)
statement_dataloader = examples_to_features(data_list, tokenizer, args.train_batch_size, 'random')
""" Training """
recall_each_epoch = []
for epoch in range(args.num_train_epochs):
for _, batch in enumerate(tqdm(statement_dataloader, desc='train|epoch_'+str(epoch))):
statement_model.train()
label_model.train()
batch = tuple(t.to(device) for t in batch)
# Get input_ids(tokens) for statements
statement_input_ids, statement_input_mask, entity_span_index, statement_index = batch
# Get embeddings for entity span in each statement
statement_outputs = statement_model(statement_input_ids, statement_input_mask)
entity_embedding_list = get_entity_embedding(statement_outputs[0], entity_span_index, args.embedding_method)
# Get pos/neg label index of each statement in the current batch
truth_label_in_batch = truth_label_index_list[statement_index.to('cpu').numpy()]
all_type_index = np.arange(len(all_type_list))
negative_label_in_batch = np.array([np.setdiff1d(all_type_index, np.array(i)) for i in truth_label_in_batch])
if args.sample_top_neg:
# For each entity, sample its top similar negative labels to construct the negative pairs
negative_label_in_batch = get_top_negative_index(label_model, negative_label_in_batch, all_type_token, entity_embedding_list, args.N_neg_sample, args.embedding_method)
else:
# random sample negative pairs
negative_label_in_batch = np.array([np.random.choice(i, args.N_neg_sample) for i in negative_label_in_batch])
# Get input_ids(tokens) for pos labels
pos_label_input_ids, pos_label_input_mask = sample_positive_type(truth_label_in_batch, all_type_token)
# Get embeddings for pos labels
pos_label_outputs = label_model(pos_label_input_ids, pos_label_input_mask)
pos_label_embedding = get_label_embedding(pos_label_outputs[0], pos_label_input_mask, args.embedding_method)
# align pos pairs
pos_entity_embedding, pos_target = align_pairs(truth_label_in_batch, entity_embedding_list, device, 'positive')
# Get input_ids(tokens) for negative labels
neg_label_input_ids, neg_label_input_mask = sample_negative_type(negative_label_in_batch, all_type_token)
# Get embeddings for neg labels
neg_label_outputs = label_model(neg_label_input_ids, neg_label_input_mask)
neg_label_embedding = get_label_embedding(neg_label_outputs[0], neg_label_input_mask, args.embedding_method)
# align neg pairs
neg_entity_embedding, neg_target = align_pairs(negative_label_in_batch, entity_embedding_list, device, 'negative')
# concate pos and neg pairs
all_entity_embedding = torch.cat((pos_entity_embedding, neg_entity_embedding), dim = 0)
all_label_embedding = torch.cat((pos_label_embedding, neg_label_embedding), dim = 0)
all_target = torch.cat((pos_target, neg_target), dim = 0)
loss_fct = CosineEmbeddingLoss(reduction='sum')
loss = loss_fct(all_entity_embedding, all_label_embedding, all_target)
if args.ENABLE_WANDB == True:
wandb.log({"loss": loss})
loss.backward()
statement_optimizer.step()
statement_optimizer.zero_grad()
label_optimizer.step()
label_optimizer.zero_grad()
if args.eval_each_epoch:
""" Saving model at this epoch """
args.statement_model_save_path = str('ep' + str(epoch) + '-') + statement_model_save_path_vanilla
args.label_model_save_path = str('ep' + str(epoch) + '-') + label_model_save_path_vanilla
save_model(statement_model, label_model, statement_model_save_dir, label_model_save_dir, args)
write_type_vector(statement_model, label_model, all_type_token, types_vector_path, all_type_list_vanilla, args)
dev_recall = evaluate(statement_model, types_vector_path, all_type_list, device, dev_path, tokenizer, args, result_dir, 'dev')
test_recall = evaluate(statement_model, types_vector_path, all_type_list, device, test_path, tokenizer, args, result_dir, 'test')
print('dev_recall:', dev_recall)
print('test_recall:', test_recall)
recall_each_epoch.append((dev_recall, test_recall))
if __name__ == "__main__":
args_train_batch_size = 16 # max: 128 when N_neg_sample is 1
args_num_train_epochs = 5
args_learning_rate = 1e-5
args_ENABLE_WANDB = True
args_K = '100, 50, 20'
args_N_neg_sample = 200
args_sample_top_neg = False
args_embedding_method = 'mean' # 'sum' or 'mean'
args_seed = 32
args_statement_model_save_path = 'model.pth'
args_label_model_save_path = 'model.pth'
args_result_name = ''
args_eval_each_epoch = True
main(args_train_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_N_neg_sample, args_embedding_method, args_seed, args_statement_model_save_path, args_label_model_save_path, args_sample_top_neg, args_eval_each_epoch) | 24,039 | 45.953125 | 258 | py |
LearningToSelect | LearningToSelect-main/BANKING77/context_TE_BANKING77.py |
import argparse
import csv
import logging
import json
import random
import sys
import codecs
import numpy as np
import torch
import torch.nn as nn
from collections import defaultdict
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from scipy.stats import beta
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.special import softmax
# from scipy.stats import pearsonr, spearmanr
# from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.metrics import pairwise
from sentence_transformers import SentenceTransformer
from transformers import RobertaTokenizer
from transformers.optimization import AdamW
from transformers import RobertaModel#RobertaForSequenceClassification
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
# from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
# import torch.nn as nn
bert_hidden_dim = 1024
pretrain_model_dir = 'roberta-large' #'roberta-large' , 'roberta-large-mnli', 'bert-large-uncased'
class RobertaForSequenceClassification(nn.Module):
def __init__(self, tagset_size):
super(RobertaForSequenceClassification, self).__init__()
self.tagset_size = tagset_size
self.roberta_single= RobertaModel.from_pretrained(pretrain_model_dir)
self.single_hidden2tag = RobertaClassificationHead(bert_hidden_dim, tagset_size)
def forward(self, input_ids, input_mask):
outputs_single = self.roberta_single(input_ids, input_mask, None)
# print('\noutputs_single:', outputs_single[0][:,0,:])
hidden_states_single = outputs_single[1]#torch.tanh(self.hidden_layer_2(torch.tanh(self.hidden_layer_1(outputs_single[1])))) #(batch, hidden)
# print('hidden_states_single:',
score_single = self.single_hidden2tag(hidden_states_single) #(batch, tag_set)
return score_single
class RobertaClassificationHead(nn.Module):
def __init__(self, bert_hidden_dim, num_labels):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(bert_hidden_dim, bert_hidden_dim)
self.dropout = nn.Dropout(0.1)
self.out_proj = nn.Linear(bert_hidden_dim, num_labels)
def forward(self, features):
x = features#[:, 0, :] # take <s> token (equiv. to [CLS])
# print('\nfeatures:', x)
# print('feature size:', x.size())
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def load_all_data(data_path):
example_list = []
with open(data_path, newline='') as f:
for row in f:
utterance = row.split('\t')[1].strip()
label = row.split('\t')[0].strip()
positive_example = {'utterance': utterance, 'class': label}
example_list.append(positive_example)
return example_list
def load_categories(json_fname):
f = open(json_fname, 'r')
cat_list = json.load(f)
return cat_list
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, premise_class=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.premise_class = premise_class
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, premise_class_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.premise_class_id = premise_class_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def load_train(self, round_list, args, train_path):
examples_list = []
class_list_up_to_now = []
round_indicator_up_to_now = []
for round in round_list:
'''first collect the class set in this round'''
examples_this_round = []
class_set_in_this_round = set()
filename = train_path
# filename = '../data/banking_data/banking77/split/'+round+'/train.txt'
readfile = codecs.open(filename, 'r', 'utf-8')
for row in readfile:
parts = row.strip().split('\t')
assert len(parts)==2
class_name = parts[0].strip()
class_set_in_this_round.add(class_name)
readfile.close()
class_list_up_to_now += list(class_set_in_this_round)
round_indicator_up_to_now+=[round]*len(class_set_in_this_round)
'''transform each example into entailment pair'''
filename = train_path
# filename = '../data/banking_data/banking77/split/'+round+'/train.txt'
readfile = codecs.open(filename, 'r', 'utf-8')
for row in readfile:
parts = row.strip().split('\t')
assert len(parts)==2
class_name = parts[0].strip()
class_str = ' '.join(class_name.split('_'))
# print('class_str:', class_str)
example_str = parts[1].strip()
'''positive pair'''
examples_this_round.append( InputExample(guid=round, text_a=example_str, text_b=class_str, label='entailment', premise_class=class_name))
'''negative pairs'''
negative_class_set = set(class_set_in_this_round)-set([class_name])
for negative_class in negative_class_set:
class_str = ' '.join(negative_class.split('_'))
examples_this_round.append( InputExample(guid=round, text_a=example_str, text_b=class_str, label='non-entailment', premise_class=class_name))
readfile.close()
examples_list.append(examples_this_round)
return examples_list, class_list_up_to_now, round_indicator_up_to_now
def load_dev_or_test(self, round_list, seen_classes, flag, dev_path):
examples_rounds = []
example_size_list = []
for round in round_list:
examples = []
instance_size = 0
filename = dev_path
# filename = '../data/banking_data/banking77/split/'+round+'/'+flag+'.txt'
readfile = codecs.open(filename, 'r', 'utf-8')
for row in readfile:
parts = row.strip().split('\t')
assert len(parts)==2
class_name = parts[0].strip()
if round == 'ood':
class_name = 'ood'
example_str = parts[1].strip()
for seen_class in seen_classes:
'''each example compares with all seen classes'''
class_str = ' '.join(seen_class.split('_'))
examples.append(
InputExample(guid=flag, text_a=example_str, text_b=class_str, label='entailment', premise_class=class_name))
instance_size+=1
readfile.close()
examples_rounds+=examples
example_size_list.append(instance_size)
return examples_rounds#, example_size_list
def get_labels(self):
'here we keep the three-way in MNLI training '
return ["entailment", "not_entailment"]
# return ["entailment", "neutral", "contradiction"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, eval_class_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
class_map = {label : i for i, label in enumerate(eval_class_list)}
# print('label_map:', label_map)
# print('class_map:', class_map)
features = []
for (ex_index, example) in enumerate(tqdm(examples, desc='writing example')):
# if ex_index % 10000 == 0:
# logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
premise_class_id = class_map[example.premise_class]))
return features
def convert_examples_to_features_concatenate(examples, label_list, eval_class_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True,
top_k_candidates = []):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
class_map = {label : i for i, label in enumerate(eval_class_list)}
features = []
group_start_idlist = [77 * i for i in range(len(examples)//77)]
for idx, group_id in enumerate(tqdm(group_start_idlist, desc='writing concat example')):
sub_examples = examples[group_id:group_id+77]
for (ex_index, example) in enumerate(sub_examples):
# if ex_index % 10000 == 0:
# logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = tokenizer.tokenize(example.text_b)
'''something added'''
# other_3_examples_in_the_group = [ex_i for ex_i in sub_examples if ex_i.text_b != example.text_b]
top_k_for_this_example = [ex_i for ex_i in top_k_candidates[idx] if ex_i != example.text_b]
# for ex_i in sub_examples:
# if ex_i.text_b != example.text_b:
tokens_b_concatenated = []
# tokens_b_concatenated.append(tokens_b+[sep_token]+tokens_b+[sep_token]+tokens_b+[sep_token]+tokens_b)
for ii in range(2):
random.shuffle(top_k_for_this_example)
tail_seq = []
for ex_i in top_k_for_this_example:
tail_seq += [sep_token]+tokenizer.tokenize(ex_i)+[sep_token]
tokens_b_concatenated.append(tokens_b+[sep_token]+tail_seq)
for tokens_b in tokens_b_concatenated:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 7 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
tokens += tokens_b
segment_ids += [sequence_b_segment_id] * (len(tokens_b))
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
premise_class_id = class_map[example.premise_class]))
return features
def get_top_K_candidates(similarity_model, examples_dict, all_class_list, device, args):
K = 60
def embedding_similarity(intent_embeddings, class_name_embeddings):
similarity = pairwise.cosine_similarity(intent_embeddings, class_name_embeddings)
return similarity
def sentence_bert_similarity(similarity_model, utterance_list, class_name_list, device):
similarity_model.to(device)
with torch.no_grad():
class_name_embeddings = similarity_model.encode(class_name_list)
intent_embeddings = similarity_model.encode(utterance_list)
similarity = embedding_similarity(torch.tensor(intent_embeddings), torch.tensor(class_name_embeddings))
return similarity
utterance_list = [i['utterance'] for i in examples_dict]
class_name_list = [i for i in all_class_list]
class_similarity = sentence_bert_similarity(similarity_model, utterance_list, class_name_list, device)
top_K_indice = [np.argsort(i)[-K:] for i in class_similarity]
top_K_candidates = [np.array(class_name_list)[i] for i in top_K_indice]
return top_K_candidates
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def examples_to_features(source_examples, label_list, eval_class_list, args, tokenizer, batch_size, output_mode, dataloader_mode='sequential'):
source_features = convert_examples_to_features_concatenate(
source_examples, label_list, eval_class_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
dev_all_input_ids = torch.tensor([f.input_ids for f in source_features], dtype=torch.long)
dev_all_input_mask = torch.tensor([f.input_mask for f in source_features], dtype=torch.long)
dev_all_segment_ids = torch.tensor([f.segment_ids for f in source_features], dtype=torch.long)
dev_all_label_ids = torch.tensor([f.label_id for f in source_features], dtype=torch.long)
dev_all_premise_class_ids = torch.tensor([f.premise_class_id for f in source_features], dtype=torch.long)
dev_data = TensorDataset(dev_all_input_ids, dev_all_input_mask, dev_all_segment_ids, dev_all_label_ids, dev_all_premise_class_ids)
if dataloader_mode=='sequential':
dev_sampler = SequentialSampler(dev_data)
else:
dev_sampler = RandomSampler(dev_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=batch_size)
return dev_dataloader
def main(args_task_name, args_cache_dir, args_round_name,
args_max_seq_length, args_do_train, args_do_eval,
args_do_lower_case, args_train_batch_size,
args_eval_batch_size, args_learning_rate,
args_num_train_epochs, args_warmup_proportion,
args_no_cuda, args_local_rank, args_seed,
args_gradient_accumulation_steps, args_fp16,
args_loss_scale, args_server_ip, args_server_port, args_train_file):
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--task_name",
default=args_task_name,
type=str,
required=False,
help="The name of the task to train.")
## Other parameters
parser.add_argument("--train_file",
default=args_train_file,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--cache_dir",
default=args_cache_dir,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--round_name",
default=args_round_name,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--result_name",
type=str,
help="result output file name")
parser.add_argument("--max_seq_length",
default=args_max_seq_length,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
default=args_do_train,
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
default = args_do_eval,
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
default = args_do_lower_case,
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=args_train_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=args_eval_batch_size,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=args_learning_rate,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=args_num_train_epochs,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=args_warmup_proportion,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
default=args_no_cuda,
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=args_local_rank,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=args_seed,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=args_gradient_accumulation_steps,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
default = args_fp16,
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float,
default=args_loss_scale,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default=args_server_ip, help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default=args_server_port, help="Can be used for distant debugging.")
# parser.add_argument('--time_stamp',
# help="Time stamp to store results")
parser.add_argument('-f')
args = parser.parse_args()
processors = {
"rte": RteProcessor
}
output_modes = {
"rte": "classification"
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
round_name_2_rounds={'r1':['n1']}
train_path = root_dir + '/data/banking_data/banking77/split/n1/' +str(args.train_file) +'.txt'
dev_path = root_dir + 'data/banking_data/banking77/split/n1/dev.txt'
test_path = root_dir + 'data/banking_data/banking77/split/n1/test.txt'
""" load data """
category_path = root_dir + 'data/banking_data/categories.json'
all_class_list = load_categories(category_path)
all_class_list = [' '.join(i.split('_')) for i in all_class_list]
train_example_list = load_all_data(train_path)
""" Top-K selection """
""" load top-K selection model """
similarity_model = SentenceTransformer('multi-qa-mpnet-base-dot-v1')
train_top_K_candidates = get_top_K_candidates(similarity_model, train_example_list, all_class_list, device, args)
model = RobertaForSequenceClassification(3)
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)
model.load_state_dict(torch.load(mnli_model), strict=False)
# model.load_state_dict(torch.load('../data/MNLI_pretrained.pt', map_location='cpu'), strict=False)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate)
processor = processors[task_name]()
output_mode = output_modes[task_name]
round_list = round_name_2_rounds.get(args.round_name)
'''load training in list'''
train_examples_list, train_class_list, train_class_2_split_list = processor.load_train(round_list, args, train_path) # no odd training examples
# print('train_class_list:', train_class_list)
assert len(train_class_list) == len(train_class_2_split_list)
# assert len(train_class_list) == 20+(len(round_list)-2)*10
'''dev and test'''
dev_examples = processor.load_dev_or_test(round_list, train_class_list, 'dev', dev_path)
test_examples = processor.load_dev_or_test(round_list, train_class_list, 'test', test_path)
# test_examples = processor.load_dev_or_test(round_list, train_class_list, 'test')
# print('train size:', [len(train_i) for train_i in train_examples_list], ' dev size:', len(dev_examples), ' test size:', len(test_examples))
entail_class_list = ['entailment', 'non-entailment']
eval_class_list = train_class_list
test_split_list = train_class_2_split_list
train_dataloader_list = []
for train_examples in train_examples_list:
train_features = convert_examples_to_features(
train_examples, entail_class_list, eval_class_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
train_features_concatenate = convert_examples_to_features_concatenate(
train_examples, entail_class_list, eval_class_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0, top_k_candidates = train_top_K_candidates)#4 if args.model_type in ['xlnet'] else 0,)
train_features+=train_features_concatenate
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_premise_class_ids = torch.tensor([f.premise_class_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_premise_class_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
train_dataloader_list.append(train_dataloader)
# dev_dataloader = examples_to_features(dev_examples, entail_class_list, eval_class_list, args, tokenizer, args.eval_batch_size, "classification", dataloader_mode='sequential')
# test_dataloader = examples_to_features(test_examples, entail_class_list, eval_class_list, args, tokenizer, args.eval_batch_size, "classification", dataloader_mode='sequential')
'''load dev set'''
dev_features = convert_examples_to_features(
dev_examples, entail_class_list, eval_class_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
dev_all_input_ids = torch.tensor([f.input_ids for f in dev_features], dtype=torch.long)
dev_all_input_mask = torch.tensor([f.input_mask for f in dev_features], dtype=torch.long)
dev_all_segment_ids = torch.tensor([f.segment_ids for f in dev_features], dtype=torch.long)
dev_all_label_ids = torch.tensor([f.label_id for f in dev_features], dtype=torch.long)
dev_all_premise_class_ids = torch.tensor([f.premise_class_id for f in dev_features], dtype=torch.long)
dev_data = TensorDataset(dev_all_input_ids, dev_all_input_mask, dev_all_segment_ids, dev_all_label_ids, dev_all_premise_class_ids)
dev_sampler = SequentialSampler(dev_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.eval_batch_size)
'''load test set'''
test_features = convert_examples_to_features(
test_examples, entail_class_list, eval_class_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
test_all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
test_all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
test_all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
test_all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_all_premise_class_ids = torch.tensor([f.premise_class_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(test_all_input_ids, test_all_input_mask, test_all_segment_ids, test_all_label_ids, test_all_premise_class_ids)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
'''training'''
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_features))
logger.info(" Batch size = %d", args.train_batch_size)
max_test_acc = 0.0
max_dev_acc = 0.0
for round_index, round in enumerate(round_list):
'''for the new examples in each round, train multiple epochs'''
train_dataloader = train_dataloader_list[round_index]
for epoch_i in range(args.num_train_epochs):
for _, batch in enumerate(tqdm(train_dataloader, desc="train|"+round+'|epoch_'+str(epoch_i))):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, _, label_ids, premise_class_ids = batch
logits = model(input_ids, input_mask)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, 3), label_ids.view(-1))
# print("\nloss:", loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
'''evaluation'''
dev_acc= evaluate(model, device, round_list, test_split_list, train_class_list, args, dev_examples, dev_dataloader, 'dev')
print(f'dev acc: {dev_acc}, max dev acc: {max_dev_acc}')
if dev_acc > max_dev_acc:
max_dev_acc = dev_acc
test_acc = evaluate(model, device, round_list, test_split_list, train_class_list, args, test_examples, test_dataloader, 'test')
print(f'test acc: {test_acc}')
print(f'TRAIN FILE: {args.train_file}')
print(f'!!!!!!!!!!final test acc: {test_acc} !!!!!!!!!!!!!!!!!\n')
def evaluate(model, device, round_list, test_split_list, train_class_list, args, dev_examples, dev_dataloader, flag):
'''evaluation'''
model.eval()
logger.info(f"***** Running {flag} *****")
logger.info(" Num examples = %d", len(dev_examples))
preds = []
gold_class_ids = []
for _, batch in enumerate(tqdm(dev_dataloader, desc="test")):
input_ids, input_mask, segment_ids, label_ids, premise_class_ids = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
gold_class_ids+=list(premise_class_ids.detach().cpu().numpy())
with torch.no_grad():
logits = model(input_ids, input_mask)
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0)
preds = softmax(preds[0],axis=1)
pred_label_3way = np.argmax(preds, axis=1) #dev_examples, 0 means "entailment"
pred_probs = list(preds[:,0]) #prob for "entailment" class: (#input, #seen_classe)
assert len(pred_label_3way) == len(dev_examples)
assert len(pred_probs) == len(dev_examples)
assert len(gold_class_ids) == len(dev_examples)
pred_label_3way = np.array(pred_label_3way).reshape(len(dev_examples)//len(train_class_list),len(train_class_list))
pred_probs = np.array(pred_probs).reshape(len(dev_examples)//len(train_class_list),len(train_class_list))
gold_class_ids = np.array(gold_class_ids).reshape(len(dev_examples)//len(train_class_list),len(train_class_list))
'''verify gold_class_ids per row'''
rows, cols = gold_class_ids.shape
for row in range(rows):
assert len(set(gold_class_ids[row,:]))==1
gold_label_ids = list(gold_class_ids[:,0])
pred_label_ids_raw = list(np.argmax(pred_probs, axis=1))
pred_max_prob = list(np.amax(pred_probs, axis=1))
pred_label_ids = []
for idd, seen_class_id in enumerate(pred_label_ids_raw):
pred_label_ids.append(seen_class_id)
assert len(pred_label_ids) == len(gold_label_ids)
acc_each_round = []
for round_name_id in round_list:
#base, n1, n2, ood
round_size = 0
rount_hit = 0
if round_name_id != 'ood':
for ii, gold_label_id in enumerate(gold_label_ids):
if test_split_list[gold_label_id] == round_name_id:
round_size+=1
if gold_label_id == pred_label_ids[ii]:
rount_hit+=1
acc_i = rount_hit/round_size
acc_each_round.append(acc_i)
final_test_performance = acc_each_round[0]
# print('\nfinal_test_performance:', final_test_performance)
return final_test_performance
if __name__ == "__main__":
args_task_name = 'rte'
args_cache_dir = ''
args_round_name = 'r1'
args_max_seq_length = 450
args_do_train = True
args_do_eval = False
args_do_lower_case = True
args_train_batch_size = 8
args_eval_batch_size = 64
args_learning_rate = 1e-6
args_num_train_epochs = 5
args_warmup_proportion = 0.1
args_no_cuda = False
args_local_rank = -1
args_seed = 42
args_gradient_accumulation_steps = 1
args_fp16 = False
args_loss_scale = 0
args_server_ip = ''
args_server_port = ''
args_train_file = 'one_shot_0'
""" LOCAL """
root_dir = './'
mnli_model = '../model/MNLI_pretrained.pt'
main(args_task_name, args_cache_dir, args_round_name,
args_max_seq_length, args_do_train, args_do_eval,
args_do_lower_case, args_train_batch_size,
args_eval_batch_size, args_learning_rate,
args_num_train_epochs, args_warmup_proportion,
args_no_cuda, args_local_rank, args_seed,
args_gradient_accumulation_steps, args_fp16,
args_loss_scale, args_server_ip, args_server_port, args_train_file) | 48,248 | 47.008955 | 218 | py |
LearningToSelect | LearningToSelect-main/BANKING77/parallel_TE_BANKING77.py | """BERT finetuning runner."""
# from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import random
import argparse
import csv
import json
from collections import defaultdict
from scipy.special import softmax
from scipy import stats
from sklearn.metrics import accuracy_score
import torch.nn as nn
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel
from transformers.optimization import AdamW
from transformers import RobertaTokenizer
from transformers import RobertaModel #RobertaForSequenceClassification
from sklearn.metrics import pairwise
from sentence_transformers import SentenceTransformer
class RobertaForTopKEntailment(nn.Module):
def __init__(self, K, len_tokenizer):
super(RobertaForTopKEntailment, self).__init__()
self.K = K
self.roberta_single= RobertaModel.from_pretrained(pretrain_model_dir)
self.roberta_single.resize_token_embeddings(len_tokenizer)
self.mlp = nn.Sequential(nn.Linear(bert_hidden_dim, bert_hidden_dim),
nn.ReLU(),
nn.LayerNorm(bert_hidden_dim),
nn.Linear(bert_hidden_dim, 1))
def forward(self, input_ids, input_mask, segment_ids, embedding_method):
outputs_single = self.roberta_single(input_ids, input_mask, None)
hidden_states = outputs_single[0] #torch.tanh(self.hidden_layer_2(torch.tanh(self.hidden_layer_1(outputs_single[1])))) #(batch, hidden)
slice_position = self.get_label_index(segment_ids)
# top_K_label_hidden_states shape: (batch_size, K, hidden)
top_K_label_hidden_states = self.get_label_embedding(hidden_states, slice_position, embedding_method)
score_single = self.mlp(top_K_label_hidden_states) #(batch, K, 2) # top_K_label_hidden_states
return score_single
def get_label_index(self, segment_ids):
"""
for each intent-top_K_label pair,
get the start and end postions for each label's tokens in the sequence.
This will help compute mean embeddings of a label
segment_ids: used to slice each label in the whole concat sequence
"""
slice_position = []
for i_th_label in np.arange(self.K):
row, column = np.where(segment_ids.cpu() == i_th_label)
for j_th_batch in np.arange(segment_ids.size()[0]):
position_in_column = np.where(row == j_th_batch)
start = np.min(position_in_column)
end = np.max(position_in_column)
i_th_label_start = column[start+1]
i_th_label_end = column[end]
slice_position.append((i_th_label_start, i_th_label_end))
slice_position = np.array(slice_position)
slice_position = slice_position.reshape(self.K, segment_ids.size()[0], 2)
slice_position = np.transpose(slice_position, (1, 0, 2))
return slice_position
def get_label_embedding(self, hidden_states, slice_position, flag):
"""
For all the top-K labels,
use their token embeddings' mean/sum to represent them
"""
top_K_label_hidden_states = torch.zeros((1, self.K, hidden_states.size()[2]))
for i_th_batch, slices in enumerate(slice_position):
sliced_embedding = []
for j_th_slice in slices:
# print(hidden_states[i_th_batch][j_th_slice[0]: j_th_slice[1], :])
label_embeddings = hidden_states[i_th_batch][j_th_slice[0]: j_th_slice[1], :]
if flag == 'mean':
label_embedding = torch.mean(label_embeddings, 0)
if flag == 'sum':
label_embedding = torch.sum(label_embeddings, 0)
sliced_embedding.append(label_embedding)
top_K_label_hidden_states = torch.cat((top_K_label_hidden_states.to('cuda'), torch.stack(sliced_embedding).unsqueeze(0)), 0)
return top_K_label_hidden_states[1:]
class InputFeatures():
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, class_segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.class_segment_ids = class_segment_ids
def load_all_data(data_path):
example_list = []
with open(data_path, newline='') as f:
for row in f:
utterance = row.split('\t')[1].strip()
label = row.split('\t')[0].strip()
positive_example = {'utterance': utterance, 'class': label}
example_list.append(positive_example)
return example_list
def load_categories(json_fname):
f = open(json_fname, 'r')
cat_list = json.load(f)
return cat_list
def convert_examples_to_features(flag, examples, top_K_candidates, ground_truth_indicator, all_class_list, max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=-2,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
class_map = {label : i for i, label in enumerate(all_class_list)}
max_length_in_data = 0
features = []
for (ex_index, example) in enumerate(tqdm(examples, desc='constructing sequence')):
tokens = [cls_token]
token_intent = tokenizer.tokenize(example)
tokens += token_intent
segment_id_indicator = -1
segment_ids = [segment_id_indicator] * (len(tokens) + 1)
"""
class_segment_ids indicates a label's real id according to the class map
for all tokens of a same label, their corresponding class_segment_ids are the same
This is to help produce the prediction labels at inference stage
"""
class_segment_ids = [-1] * (len(tokens) + 1)
for candidate in top_K_candidates[ex_index]:
segment_id_indicator += 1
class_ids_indicator = class_map[candidate]
tokens += [sep_token] * 2
token_candidate = tokenizer.tokenize(candidate)
tokens += token_candidate
segment_ids += [segment_id_indicator] * (len(token_candidate) + 2)
class_segment_ids += [class_ids_indicator] * (len(token_candidate) + 2)
tokens += [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
max_length_in_data = max(max_length_in_data, len(input_ids))
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
class_segment_ids = ([pad_token_segment_id] * padding_length) + class_segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
class_segment_ids = class_segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(class_segment_ids) == max_seq_length
if flag == 'train':
label_id = ground_truth_indicator[ex_index]
else:
label_id = -1
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
class_segment_ids=class_segment_ids))
print('max_length_in_data:', max_length_in_data)
return features
def examples_to_features(flag, source_examples, top_K_candidates, ground_truth_indicator, all_class_list, args, tokenizer, batch_size, dataloader_mode='sequential'):
source_features = convert_examples_to_features(flag,
source_examples, top_K_candidates, ground_truth_indicator, all_class_list, args.max_seq_length, tokenizer,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=-2)#4 if args.model_type in ['xlnet'] else 0,)
all_input_ids = torch.tensor(np.array([f.input_ids for f in source_features]), dtype=torch.long)
all_input_mask = torch.tensor(np.array([f.input_mask for f in source_features]), dtype=torch.long)
all_segment_ids = torch.tensor(np.array([f.segment_ids for f in source_features]), dtype=torch.long)
all_label_ids = torch.tensor(np.array([f.label_id for f in source_features]), dtype=torch.long)
all_class_segment_ids = torch.tensor(np.array([f.class_segment_ids for f in source_features]), dtype=torch.long)
data_tensor = TensorDataset(all_input_ids, all_input_mask,
all_segment_ids, all_label_ids,
all_class_segment_ids)
if dataloader_mode=='sequential':
sampler = SequentialSampler(data_tensor)
else:
sampler = RandomSampler(data_tensor)
dataloader = DataLoader(data_tensor, sampler=sampler, batch_size=batch_size)
return dataloader
def get_top_K_candidates(similarity_model, examples_dict, all_class_list, device, args, flag):
K = int(args.K)
H = args.H
T = args.T
ground_truth_indicator = []
def embedding_similarity(intent_embeddings, class_name_embeddings):
similarity = pairwise.cosine_similarity(intent_embeddings, class_name_embeddings)
return similarity
def sentence_bert_similarity(similarity_model, utterance_list, class_name_list, device):
similarity_model.to(device)
with torch.no_grad():
class_name_embeddings = similarity_model.encode(class_name_list)
intent_embeddings = similarity_model.encode(utterance_list)
similarity = embedding_similarity(torch.tensor(intent_embeddings), torch.tensor(class_name_embeddings))
return similarity
def augment_data(data, K, H, T, flag):
"""
K is the K in top-K
H means how many duplicates when ground truth in each k_th position
Duplicate each piece of training data to K*H
"""
data = np.array(data)
dim = len(data.shape)
if flag == 'train':
""" make sure each position has a positive class """
N = K * H
""" shuffle N times """
# N = H
elif flag == 'test' or flag == 'dev':
N = T
if dim == 1:
augmented_data = np.vstack([data]*N)
augmented_data = np.reshape(augmented_data, len(data) * N, order='F')
if dim == 2:
augmented_data = np.vstack([[data]]*N)
augmented_data = np.reshape(augmented_data, (len(data) * N, -1), order='F')
return augmented_data
utterance_list = [i['utterance'] for i in examples_dict]
truth_class_list = [' '.join(i['class'].split('_')) for i in examples_dict]
class_name_list = [i for i in all_class_list]
class_similarity = sentence_bert_similarity(similarity_model, utterance_list, class_name_list, device)
top_K_indice = [np.argsort(i)[-K:] for i in class_similarity]
top_K_candidates = [np.array(class_name_list)[i] for i in top_K_indice]
recall = compute_recall(utterance_list, truth_class_list, top_K_candidates)
print('!!!! Checking top-K-recall: [' + str(flag) + ']!!!!!:', recall)
if flag == 'train':
""" if there is a ground truth in top-K candidates, if not, replace the
the class with smallest similarity with the ground truth """
miss = 0
for index, truth in enumerate(truth_class_list):
if truth in top_K_candidates[index]:
continue
else:
miss += 1
top_K_candidates[index][0] = truth
# print('miss index:', index)
print('miss:', miss)
utterance_list = augment_data(utterance_list, K, H, T, 'train')
truth_class_list = augment_data(truth_class_list, K, H, T, 'train')
top_K_candidates = augment_data(top_K_candidates, K, H, T, 'train')
""" make sure each position has a positive class """
""" return a ground truth index indicator in candidates """
for index, truth in enumerate(truth_class_list):
ground_truth_index = np.where(top_K_candidates[index] == truth)
candidates_without_truth = np.delete(top_K_candidates[index], ground_truth_index)
np.random.shuffle(candidates_without_truth)
truth_position = index % (K*H) // H
augmented_candidate = np.insert(candidates_without_truth, truth_position,truth)
top_K_candidates[index] = augmented_candidate
indicator = np.isin(np.asarray(top_K_candidates[index]), np.asarray(truth)).astype(int)
ground_truth_indicator.append(indicator)
else:
utterance_list = augment_data(utterance_list, K, H, T, flag)
truth_class_list = augment_data(truth_class_list, K, H, T, flag)
top_K_candidates = augment_data(top_K_candidates, K, H, T, flag)
for index, truth in enumerate(truth_class_list):
temp_candidates = top_K_candidates[index]
np.random.shuffle(temp_candidates)
top_K_candidates[index] = temp_candidates
return top_K_candidates, ground_truth_indicator, truth_class_list, utterance_list
def compute_recall(utterance_list, class_list, class_candidates):
total = len(utterance_list)
hit = 0
for index, utterance in enumerate(utterance_list):
top_K_candidates = class_candidates[index]
true_class = class_list[index]
if true_class in top_K_candidates:
hit += 1
return hit/total
def metric(preds, top_K_candidates, truth_label_list, class_map, T):
pred_label_index = np.argmax(preds, axis=1)
pred_results = np.array(top_K_candidates)[np.arange(len(top_K_candidates)), np.array(pred_label_index)]
pred_results = [class_map[i] for i in pred_results]
test_ground_truth_class_id = [class_map[i] for i in truth_label_list]
acc = accuracy_score(test_ground_truth_class_id, pred_results)
pred_results = np.array(pred_results).reshape(-1, T)
pred_results = stats.mode(pred_results, axis=1).mode.flatten()
test_ground_truth_class_id = np.array(test_ground_truth_class_id).reshape(-1, T)[:, 0]
acc = accuracy_score(test_ground_truth_class_id, pred_results)
acc = f"{acc:.2%}"
return acc
def evaluate(model, dev_dataloader, test_dataloader, device, dev_top_K_candidates, test_top_K_candidates, class_map, dev_truth_label_list, test_truth_label_list, args):
""" evaluate """
model.eval()
len_test = len(test_truth_label_list)
len_dev = len(dev_truth_label_list)
""" get pred for dev data """
dev_preds = []
for _, batch in enumerate(tqdm(dev_dataloader, desc="load dev data")):
input_ids, input_mask, segment_ids, label_ids, class_segment_ids = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids, args.embedding_method)
if len(dev_preds) == 0:
dev_preds.append(logits.detach().cpu().numpy())
else:
dev_preds[0] = np.append(dev_preds[0], logits.detach().cpu().numpy(), axis=0)
dev_preds = dev_preds[0].reshape(dev_preds[0].shape[0], -1)
dev_preds = softmax(dev_preds,axis=1)
dev_performance = metric(dev_preds, dev_top_K_candidates, dev_truth_label_list, class_map, args.T)
""" get pred for test data """
test_preds = []
for _, batch in enumerate(tqdm(test_dataloader, desc="load test data")):
input_ids, input_mask, segment_ids, label_ids, class_segment_ids = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids, args.embedding_method)
if len(test_preds) == 0:
test_preds.append(logits.detach().cpu().numpy())
else:
test_preds[0] = np.append(test_preds[0], logits.detach().cpu().numpy(), axis=0)
test_preds = test_preds[0].reshape(test_preds[0].shape[0], -1)
test_preds = softmax(test_preds,axis=1)
test_performance = metric(test_preds, test_top_K_candidates, test_truth_label_list, class_map, args.T)
print('-------------------')
return dev_performance, test_performance
def main(args_train_batch_size, args_test_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_embedding_method, args_seed, args_eval_each_epoch, args_T, args_max_seq_length, args_train_file, args_save_epochs, args_H):
parser = argparse.ArgumentParser()
parser.add_argument("--train_batch_size",
default=args_train_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--test_batch_size",
default=args_test_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=args_learning_rate,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=args_num_train_epochs,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--eval_each_epoch",
default=args_eval_each_epoch,
action='store_true',
help="For each entity, sample its top similar negative labels to construct the negative pairs. If set to False, do random sample")
parser.add_argument("--save_epochs",
default=args_save_epochs,
type=int,
help="Save checkpoint every X epochs of training")
parser.add_argument("--embedding_method",
default=args_embedding_method,
type=str,
help="Use mean or sum to get embeddings")
parser.add_argument("--K",
default=args_K,
type=int,
help="Total number of top candidates selected")
parser.add_argument("--H",
default=args_H,
type=int,
help="Total number of top candidates selected")
parser.add_argument("--T",
default=args_T,
type=int,
help="The number of augmentation for each piece of data")
parser.add_argument("--ENABLE_WANDB",
default=args_ENABLE_WANDB,
action='store_true',
help="Use wandb or not.")
parser.add_argument('--seed',
type=int,
default=args_seed,
help="random seed for initialization")
parser.add_argument('--result_name',
type=str,
default=args_result_name)
parser.add_argument("--max_seq_length",
default=args_max_seq_length,
type=int,
help="max_seq_length")
parser.add_argument("--train_file",
default=args_train_file,
type=str,
help="the name of train file")
parser.add_argument('-f')
args = parser.parse_args()
train_path = root_dir + 'data/banking_data/sampled_data/' + str(args.train_file) + '.txt'
device = torch.device("cuda")
""" set random seed """
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
""" load data """
all_class_list = load_categories(category_path)
all_class_list = [' '.join(i.split('_')) for i in all_class_list]
train_example_list = load_all_data(train_path)
dev_example_list = load_all_data(dev_path)
test_example_list = load_all_data(test_path)
class_map = {' '.join(label.split('_')) : i for i, label in enumerate(all_class_list)}
""" load top-k Entailment model """
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=True)
model = RobertaForTopKEntailment(args.K, len(tokenizer))
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate)
""" Top-K selection """
""" load top-K selection model """
similarity_model = SentenceTransformer('multi-qa-mpnet-base-dot-v1')
train_top_K_candidates, train_ground_truth_indicator, _, train_utterance_list = get_top_K_candidates(similarity_model, train_example_list, all_class_list, device, args, 'train')
dev_top_K_candidates, _, dev_truth_label_list, dev_utterance_list = get_top_K_candidates(similarity_model, dev_example_list, all_class_list, device, args, 'dev')
test_top_K_candidates, _, test_truth_label_list, test_utterance_list = get_top_K_candidates(similarity_model, test_example_list, all_class_list, device, args, 'test')
test_ground_truth_class_id = [class_map[i] for i in test_truth_label_list]
dev_ground_truth_class_id = [class_map[i] for i in dev_truth_label_list]
print('Top-K selection')
""" ------------------- """
train_dataloader = examples_to_features('train', train_utterance_list, train_top_K_candidates, train_ground_truth_indicator, all_class_list, args, tokenizer, args.train_batch_size, dataloader_mode='random')
dev_dataloader = examples_to_features('dev', dev_utterance_list, dev_top_K_candidates, dev_ground_truth_class_id, all_class_list, args, tokenizer, args.test_batch_size, dataloader_mode='sequential')
test_dataloader = examples_to_features('test', test_utterance_list, test_top_K_candidates, test_ground_truth_class_id, all_class_list, args, tokenizer, args.test_batch_size, dataloader_mode='sequential')
""" training """
performence_each_epoch = []
for epoch in range(args.num_train_epochs):
for _, batch in enumerate(tqdm(train_dataloader, desc='train|epoch'+str(epoch))):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, class_segment_ids= batch
logits = model(input_ids, input_mask, segment_ids, args.embedding_method)
bcsz= input_ids.shape[0]
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits.view(bcsz, -1), label_ids.to(device).float())
loss.backward()
optimizer.step()
optimizer.zero_grad()
if args.eval_each_epoch and (epoch+1) % args.save_epochs == 0:
dev_performance, test_performance = evaluate(model, dev_dataloader, test_dataloader, device, dev_top_K_candidates, test_top_K_candidates, class_map, dev_truth_label_list, test_truth_label_list, args)
print(args.train_file, ' dev_performance:', dev_performance)
print(args.train_file, ' test_performance:', test_performance)
print('-------------------')
training_details = f'ep{epoch}_{args.train_file}_'f'K{args.K}_H_{args.H}_SEED{args.seed}'
performence_each_epoch.append((dev_performance, test_performance, training_details))
final_test_performance = sorted(performence_each_epoch, key=lambda x: -float(x[0].replace('%', 'e-2')))[0][1]
final_dev_performance = sorted(performence_each_epoch, key=lambda x: -float(x[0].replace('%', 'e-2')))[0][0]
final_model = sorted(performence_each_epoch, key=lambda x: -float(x[0].replace('%', 'e-2')))[0][2]
if __name__ == "__main__":
args_train_batch_size = 8
args_test_batch_size = 128
args_num_train_epochs = 2
args_learning_rate = 5e-6
args_ENABLE_WANDB = False
args_K =25
args_H = 1
args_T = 1
args_embedding_method = 'mean' # 'sum' or 'mean'
args_seed = 36
args_result_name = ''
args_eval_each_epoch = True
args_max_seq_length = 400 # 320 if K is 35. # K = 60 max:442
args_train_file = 'five_shot_0'
args_save_epochs = 1
""" LOCAL """
root_dir = '../'
""" --------- """
bert_hidden_dim = 1024
pretrain_model_dir = 'roberta-large' #'roberta-large' , 'roberta-large-mnli', 'bert-large-uncased'
dev_path = root_dir + 'data/banking_data/sampled_data/dev.txt'
test_path = root_dir + 'data/banking_data/sampled_data/test.txt'
category_path = root_dir + 'data/banking_data/categories.json'
main(args_train_batch_size, args_test_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_embedding_method, args_seed, args_eval_each_epoch, args_T, args_max_seq_length, args_train_file,args_save_epochs, args_H) | 27,865 | 43.5856 | 252 | py |
LearningToSelect | LearningToSelect-main/MCTest/parallel_TE_MCTest.py | """BERT finetuning runner."""
# from __future__ import absolute_import, division, print_function
import codecs
import numpy as np
import torch
import random
import argparse
import json
from scipy.special import softmax
from sklearn.metrics import accuracy_score
from collections import defaultdict
import torch.nn as nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel
from transformers.optimization import AdamW
class RoBERTaForTopKEntailment(nn.Module):
def __init__(self, K):
super(RoBERTaForTopKEntailment, self).__init__()
self.K = K
self.roberta_single= AutoModel.from_pretrained(pretrain_model_dir)
self.mlp = nn.Sequential(nn.Linear(bert_hidden_dim, bert_hidden_dim),
nn.ReLU(),
nn.LayerNorm(bert_hidden_dim),
nn.Linear(bert_hidden_dim, 1))
def forward(self, input_ids, input_mask, segment_ids, embedding_method):
outputs_single = self.roberta_single(input_ids, input_mask, None)
hidden_states = outputs_single[0] #torch.tanh(self.hidden_layer_2(torch.tanh(self.hidden_layer_1(outputs_single[1])))) #(batch, hidden)
slice_position = self.get_label_index(segment_ids)
# top_K_label_hidden_states shape: (batch_size, K, hidden)
top_K_label_hidden_states = self.get_label_embedding(hidden_states, slice_position, embedding_method)
score_single = self.mlp(top_K_label_hidden_states) #(batch, K, 2) # top_K_label_hidden_states
return score_single
def get_label_index(self, segment_ids):
"""
for each intent-top_K_label pair,
get the start and end postions for each label's tokens in the sequence.
This will help compute mean embeddings of a label
segment_ids: used to slice each label in the whole concat sequence
"""
slice_position = []
for i_th_label in np.arange(self.K):
row, column = np.where(segment_ids.cpu() == i_th_label)
for j_th_batch in np.arange(segment_ids.size()[0]):
position_in_column = np.where(row == j_th_batch)
start = np.min(position_in_column)
end = np.max(position_in_column)
i_th_label_start = column[start+1]
i_th_label_end = column[end]
slice_position.append((i_th_label_start, i_th_label_end))
slice_position = np.array(slice_position)
slice_position = slice_position.reshape(self.K, segment_ids.size()[0], 2)
slice_position = np.transpose(slice_position, (1, 0, 2))
return slice_position
def get_label_embedding(self, hidden_states, slice_position, flag):
"""
For all the top-K labels,
use their token embeddings' mean/sum to represent them
"""
top_K_label_hidden_states = torch.zeros((1, self.K, hidden_states.size()[2]))
for i_th_batch, slices in enumerate(slice_position):
sliced_embedding = []
for j_th_slice in slices:
# print(hidden_states[i_th_batch][j_th_slice[0]: j_th_slice[1], :])
label_embeddings = hidden_states[i_th_batch][j_th_slice[0]: j_th_slice[1], :]
if flag == 'mean':
label_embedding = torch.mean(label_embeddings, 0)
if flag == 'sum':
label_embedding = torch.sum(label_embeddings, 0)
sliced_embedding.append(label_embedding)
top_K_label_hidden_states = torch.cat((top_K_label_hidden_states.to('cuda'), torch.stack(sliced_embedding).unsqueeze(0)), 0)
return top_K_label_hidden_states[1:]
class InputFeatures():
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
def augment_data(data, K, H, T, flag):
"""
K is the K in top-K
H means how many duplicates when ground truth in each k_th position
Duplicate each piece of training data to K*H
"""
data = np.array(data)
dim = len(data.shape)
if flag == 'train':
# """ make sure each position has a positive class """
# N = K * H
N = K
""" shuffle N times """
# N = H
elif flag == 'test' or flag == 'dev':
N = T
if dim == 1:
augmented_data = np.vstack([data]*N)
augmented_data = np.reshape(augmented_data, len(data) * N, order='F')
if dim == 2:
augmented_data = np.vstack([[data]]*N)
augmented_data = np.reshape(augmented_data, (len(data) * N, -1), order='F')
return augmented_data
def load_all_data(data_path, args, flag):
print('loading MCTest...', data_path)
readfile = codecs.open(data_path, 'r', 'utf-8')
pos_size = 0
examples_dict = defaultdict(list)
for idx, line in enumerate(readfile):
parts = line.strip().split('\t')
if len(parts) ==3:
premise = parts[1]
hypothesis = parts[2]
label = 'entailment' if parts[0] == 'entailment' else 'not_entailment'
if label == 'entailment':
examples_dict[premise].append((hypothesis, 'yes'))
else:
examples_dict[premise].append((hypothesis, 'no'))
ans_candidates = []
ground_truth_indicator = []
truth_label_list = []
example_list = []
for key, val in examples_dict.items():
example_list.append({'article' :key})
ans_candidates.append([i[0] for i in val])
truth_label_list.append([i[0] for i in val if i[1] == 'yes'])
ans_candidates = np.array(ans_candidates).reshape(-1, 4)
truth_label_list = np.array(truth_label_list).reshape(-1, 1)
example_list = np.repeat(np.array(example_list) ,[4 for i in range(len(example_list))])
print('MCTest size:', len(example_list))
example_list = augment_data(example_list,args.K, args.H, args.T, flag)
truth_label_list = augment_data(truth_label_list,args.K, args.H, args.T, flag)
ans_candidates = augment_data(ans_candidates,args.K, args.H, args.T, flag)
if flag == 'train':
""" make sure each position has a positive class """
""" return a ground truth index indicator in candidates """
for index, truth in enumerate(truth_label_list):
ground_truth_index = np.where(ans_candidates[index] == truth)
candidates_without_truth = np.delete(ans_candidates[index], ground_truth_index)
np.random.shuffle(candidates_without_truth)
# truth_position = index % (args.K*args.H) // args.H
truth_position = index % args.K
augmented_candidate = np.insert(candidates_without_truth, truth_position,truth)
ans_candidates[index] = augmented_candidate
indicator = np.isin(np.asarray(ans_candidates[index]), np.asarray(truth)).astype(int)
ground_truth_indicator.append(indicator)
return ans_candidates, ground_truth_indicator, truth_label_list, example_list
def truncate_article(tokens_article, tokens_candidate_list, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_article) + sum([len(i) for i in tokens_candidate_list])
if total_length <= max_length:
break
else:
tokens_article.pop()
def convert_examples_to_features(flag, examples, top_K_candidates, ground_truth_indicator, max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=-2,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
global exceed_num
exceed_num = 0
max_length_in_data = 0
features = []
for (ex_index, example) in enumerate(tqdm(examples, desc='constructing sequence')):
tokens_article = tokenizer.tokenize(example['article'])
tokens_candidate_list = []
for candidate in top_K_candidates[ex_index]:
tokens_candidate = tokenizer.tokenize(candidate)
tokens_candidate_list.append(tokens_candidate)
special_tokens_count = 4 * 2 + 2
""" how many data need to be truncated """
if (len(tokens_article) + sum([len(i) for i in tokens_candidate_list])) > max_seq_length - special_tokens_count:
exceed_num += 1
truncate_article(tokens_article, tokens_candidate_list, max_seq_length - special_tokens_count)
tokens = [cls_token]
""" tokenize article """
tokens += tokens_article
segment_id_indicator = -1
segment_ids = [segment_id_indicator] * (len(tokens) + 1)
"""
class_segment_ids indicates a label's real id according to the class map
for all tokens of a same label, their corresponding class_segment_ids are the same
This is to help produce the prediction labels at inference stage
"""
for tokens_candidate in tokens_candidate_list:
segment_id_indicator += 1
tokens += [sep_token] * 2
tokens += tokens_candidate
segment_ids += [segment_id_indicator] * (len(tokens_candidate) + 2)
tokens += [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
max_length_in_data = max(max_length_in_data, len(input_ids))
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if flag == 'train':
label_id = ground_truth_indicator[ex_index]
else:
label_id = -1
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
print('max_length_in_data:', max_length_in_data)
return features
def examples_to_features(flag, source_examples, top_K_candidates, ground_truth_indicator, args, tokenizer, batch_size, dataloader_mode='sequential'):
source_features = convert_examples_to_features(flag,
source_examples, top_K_candidates, ground_truth_indicator, args.max_seq_length, tokenizer,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=-2)#4 if args.model_type in ['xlnet'] else 0,)
all_input_ids = torch.tensor(np.array([f.input_ids for f in source_features]), dtype=torch.long)
all_input_mask = torch.tensor(np.array([f.input_mask for f in source_features]), dtype=torch.long)
all_segment_ids = torch.tensor(np.array([f.segment_ids for f in source_features]), dtype=torch.long)
all_label_ids = torch.tensor(np.array([f.label_id for f in source_features]), dtype=torch.long)
data_tensor = TensorDataset(all_input_ids, all_input_mask,
all_segment_ids, all_label_ids)
if dataloader_mode=='sequential':
sampler = SequentialSampler(data_tensor)
else:
sampler = RandomSampler(data_tensor)
dataloader = DataLoader(data_tensor, sampler=sampler, batch_size=batch_size)
return dataloader
def metric(preds, top_K_candidates, truth_label_list, T):
pred_label_index = np.argmax(preds, axis=1)
pred_results = np.array(top_K_candidates)[np.arange(len(top_K_candidates)), np.array(pred_label_index)]
acc = accuracy_score(truth_label_list, pred_results)
acc = f"{acc:.2%}"
return acc
def evaluate(model, dev_dataloader, test_dataloader, device, dev_top_K_candidates, test_top_K_candidates, dev_truth_label_list, test_truth_label_list, args):
""" evaluate """
model.eval()
len_test = len(test_truth_label_list)
len_dev = len(dev_truth_label_list)
""" get pred for dev data """
dev_preds = []
for _, batch in enumerate(tqdm(dev_dataloader, desc="load dev data")):
input_ids, input_mask, segment_ids, label_ids = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids, args.embedding_method)
if len(dev_preds) == 0:
dev_preds.append(logits.detach().cpu().numpy())
else:
dev_preds[0] = np.append(dev_preds[0], logits.detach().cpu().numpy(), axis=0)
dev_preds = dev_preds[0].reshape(dev_preds[0].shape[0], -1)
dev_preds = softmax(dev_preds,axis=1)
dev_performance = metric(dev_preds, dev_top_K_candidates, dev_truth_label_list, args.T)
""" get pred for test data """
test_preds = []
for _, batch in enumerate(tqdm(test_dataloader, desc="load test data")):
input_ids, input_mask, segment_ids, label_ids = batch
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids, args.embedding_method)
if len(test_preds) == 0:
test_preds.append(logits.detach().cpu().numpy())
else:
test_preds[0] = np.append(test_preds[0], logits.detach().cpu().numpy(), axis=0)
test_preds = test_preds[0].reshape(test_preds[0].shape[0], -1)
test_preds = softmax(test_preds,axis=1)
test_performance = metric(test_preds, test_top_K_candidates, test_truth_label_list, args.T)
print('-------------------')
return dev_performance, test_performance
def main(args_train_batch_size, args_test_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_embedding_method, args_seed, args_eval_each_epoch, args_T, args_max_seq_length, args_train_file, args_dev_file, args_test_file, args_save_epochs, args_H):
parser = argparse.ArgumentParser()
parser.add_argument("--train_batch_size",
default=args_train_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--test_batch_size",
default=args_test_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=args_learning_rate,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=args_num_train_epochs,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument("--eval_each_epoch",
default=args_eval_each_epoch,
action='store_true',
help="For each entity, sample its top similar negative labels to construct the negative pairs. If set to False, do random sample")
parser.add_argument("--save_epochs",
default=args_save_epochs,
type=int,
help="Save checkpoint every X epochs of training")
parser.add_argument("--embedding_method",
default=args_embedding_method,
type=str,
help="Use mean or sum to get embeddings")
parser.add_argument("--K",
default=args_K,
type=int,
help="Total number of top candidates selected")
parser.add_argument("--H",
default=args_H,
type=int,
help="Total number of top candidates selected")
parser.add_argument("--T",
default=args_T,
type=int,
help="The number of augmentation for each piece of data")
parser.add_argument("--ENABLE_WANDB",
default=args_ENABLE_WANDB,
action='store_true',
help="Use wandb or not.")
parser.add_argument('--seed',
type=int,
default=args_seed,
help="random seed for initialization")
parser.add_argument('--result_name',
type=str,
default=args_result_name)
parser.add_argument("--max_seq_length",
default=args_max_seq_length,
type=int,
help="max_seq_length")
parser.add_argument("--train_file",
default=args_train_file,
type=str,
help="the name of train file")
parser.add_argument("--dev_file",
default=args_dev_file,
type=str,
help="the name of dev file")
parser.add_argument("--test_file",
default=args_test_file,
type=str,
help="the name of test file")
parser.add_argument('-f')
args = parser.parse_args()
train_path = root_dir + 'data/MCTest/' + str(args.train_file) + '_in_entail.txt'
dev_path = root_dir + 'data/MCTest/' + str(args.dev_file) + '_in_entail.txt'
test_path = root_dir + 'data/MCTest/' + str(args.test_file) + '_in_entail.txt'
device = torch.device("cuda")
""" set random seed """
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
""" load data """
train_ans_candidates, train_ground_truth_indicator, train_truth_label_list, train_example_list = load_all_data(train_path, args, 'train')
dev_ans_candidates, dev_ground_truth_indicator, dev_truth_label_list, dev_example_list = load_all_data(dev_path, args, 'dev')
test_ans_candidates, test_ground_truth_indicator, test_truth_label_list, test_example_list = load_all_data(test_path, args, 'test')
""" load top-k Entailment model """
tokenizer = AutoTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=True)
model = RoBERTaForTopKEntailment(args.K)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate)
""" ------------------- """
train_dataloader = examples_to_features('train', train_example_list, train_ans_candidates, train_ground_truth_indicator, args, tokenizer, args.train_batch_size, dataloader_mode='random')
print('train exceed_num:', exceed_num)
dev_dataloader = examples_to_features('dev', dev_example_list, dev_ans_candidates, dev_ground_truth_indicator, args, tokenizer, args.test_batch_size, dataloader_mode='sequential')
print('dev exceed_num:', exceed_num)
test_dataloader = examples_to_features('test', test_example_list, test_ans_candidates, test_ground_truth_indicator, args, tokenizer, args.test_batch_size, dataloader_mode='sequential')
print('test exceed_num:', exceed_num)
""" training """
performence_each_epoch = []
for epoch in range(args.num_train_epochs):
for _, batch in enumerate(tqdm(train_dataloader, desc='train|epoch'+str(epoch))):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = model(input_ids, input_mask, segment_ids, args.embedding_method)
bcsz= input_ids.shape[0]
loss_fct = CrossEntropyLoss()
label_ids = torch.tensor([i.index(1) for i in label_ids.cpu().numpy().tolist()]).to(device)
loss = loss_fct(logits.view(bcsz, -1), label_ids.to(device))
# loss_fct = BCEWithLogitsLoss()
# loss = loss_fct(logits.view(bcsz, -1), label_ids.to(device).float())
loss.backward()
optimizer.step()
optimizer.zero_grad()
if args.eval_each_epoch and (epoch+1) % args.save_epochs == 0:
dev_performance, test_performance = evaluate(model, dev_dataloader, test_dataloader, device, dev_ans_candidates, test_ans_candidates, dev_truth_label_list, test_truth_label_list, args)
print(args.train_file, ' dev_performance:', dev_performance)
print(args.train_file, ' test_performance:', test_performance)
print('-------------------')
training_details = f'ep{epoch}_{args.train_file}_'f'K{args.K}_H_{args.H}_SEED{args.seed}'
performence_each_epoch.append((dev_performance, test_performance, training_details))
final_test_performance = sorted(performence_each_epoch, key=lambda x: -float(x[0].replace('%', 'e-2')))[0][1]
final_dev_performance = sorted(performence_each_epoch, key=lambda x: -float(x[0].replace('%', 'e-2')))[0][0]
final_model = sorted(performence_each_epoch, key=lambda x: -float(x[0].replace('%', 'e-2')))[0][2]
if __name__ == "__main__":
args_train_batch_size = 4
args_test_batch_size = 256 #32
args_num_train_epochs = 5
args_learning_rate = 1e-5
args_ENABLE_WANDB = False
args_K =4
args_H = 4
args_T = 1
args_embedding_method = 'mean' # 'sum' or 'mean'
args_seed = 42
args_result_name = ''
args_eval_each_epoch = True
args_max_seq_length = 512 # 320 if K is 35. # K = 60 max:442
args_train_file = 'mc500.train'
args_dev_file = 'mc500.dev'
args_test_file = 'mc500.test'
args_save_epochs = 1
""" LOCAL """
root_dir = '../'
""" --------- """
bert_hidden_dim = 1024
pretrain_model_dir = 'roberta-large' #'roberta-large' , 'roberta-large-mnli', 'bert-large-uncased'
main(args_train_batch_size, args_test_batch_size, args_num_train_epochs, args_learning_rate, args_ENABLE_WANDB, args_K, args_embedding_method, args_seed, args_eval_each_epoch, args_T, args_max_seq_length, args_train_file,args_dev_file, args_test_file, args_save_epochs, args_H) | 25,150 | 42.893543 | 283 | py |
LearningToSelect | LearningToSelect-main/MCTest/context_TE_MCTest.py | # Copyright (c) 2018, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""BERT finetuning runner."""
from __future__ import absolute_import, division, print_function
import wandb
from tqdm import tqdm
import argparse
import csv
import logging
import os
import random
import sys
import codecs
import numpy as np
import torch
import torch.nn as nn
from collections import defaultdict
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from scipy.stats import beta
from torch.nn import CrossEntropyLoss, MSELoss
from scipy.special import softmax
from transformers.models.roberta.tokenization_roberta import RobertaTokenizer
from transformers.optimization import AdamW
from transformers.models.roberta.modeling_roberta import RobertaModel#RobertaForSequenceClassification
from sklearn.metrics import ndcg_score
p = os.path.abspath('../')
if p not in sys.path:
sys.path.append(p)
from load_data import load_harsh_data, get_MCTest_examples
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
bert_hidden_dim = 1024
pretrain_model_dir = 'roberta-large' #'roberta-large' , 'roberta-large-mnli', 'bert-large-uncased'
def store_transformers_models(model, tokenizer, output_dir, flag_str):
'''
store the model
'''
output_dir+='/'+flag_str
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
print('starting model storing....')
# model.save_pretrained(output_dir)
torch.save(model.state_dict(), output_dir)
# tokenizer.save_pretrained(output_dir)
print('store succeed')
class RobertaForSequenceClassification(nn.Module):
def __init__(self, tagset_size):
super(RobertaForSequenceClassification, self).__init__()
self.tagset_size = tagset_size
self.roberta_single= RobertaModel.from_pretrained(pretrain_model_dir)
self.single_hidden2tag = RobertaClassificationHead(bert_hidden_dim, tagset_size)
def forward(self, input_ids, input_mask):
outputs_single = self.roberta_single(input_ids, input_mask, None)
hidden_states_single = outputs_single[1]#torch.tanh(self.hidden_layer_2(torch.tanh(self.hidden_layer_1(outputs_single[1])))) #(batch, hidden)
score_single = self.single_hidden2tag(hidden_states_single) #(batch, tag_set)
return score_single
class RobertaClassificationHead(nn.Module):
def __init__(self, bert_hidden_dim, num_labels):
super(RobertaClassificationHead, self).__init__()
self.dense = nn.Linear(bert_hidden_dim, bert_hidden_dim)
self.dropout = nn.Dropout(0.1)
self.out_proj = nn.Linear(bert_hidden_dim, num_labels)
def forward(self, features):
x = features#[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = torch.tanh(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_MNLI_train_and_dev(self, train_filename, dev_filename_list):
'''
classes: ["entailment", "neutral", "contradiction"]
'''
examples_per_file = []
for filename in [train_filename]+dev_filename_list:
examples=[]
readfile = codecs.open(filename, 'r', 'utf-8')
line_co=0
for row in readfile:
if line_co>0:
line=row.strip().split('\t')
guid = "train-"+str(line_co-1)
# text_a = 'MNLI. '+line[8].strip()
text_a = line[8].strip()
text_b = line[9].strip()
label = line[-1].strip() #["entailment", "neutral", "contradiction"]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
line_co+=1
readfile.close()
print('loaded MNLI size:', len(examples))
examples_per_file.append(examples)
dev_examples = []
for listt in examples_per_file[1:]:
dev_examples+=listt
return examples_per_file[0], dev_examples #train, dev
def get_labels(self):
'here we keep the three-way in MNLI training '
return ["entailment", "not_entailment"]
# return ["entailment", "neutral", "contradiction"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 4 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_features_concatenate(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False,
cls_token='[CLS]',
cls_token_segment_id=1,
sep_token='[SEP]',
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
mask_padding_with_zero=True):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
group_start_idlist = [4* i for i in range(len(examples)//4)]
for group_id in group_start_idlist:
sub_examples = examples[group_id:group_id+4]
for (ex_index, example) in enumerate(sub_examples):
# if ex_index % 10000 == 0:
# logger.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = tokenizer.tokenize(example.text_b)
'''something added'''
other_3_examples_in_the_group = [ex_i for ex_i in sub_examples if ex_i.text_b != example.text_b]
# for ex_i in sub_examples:
# if ex_i.text_b != example.text_b:
tokens_b_concatenated = []
# tokens_b_concatenated.append(tokens_b+[sep_token]+tokens_b+[sep_token]+tokens_b+[sep_token]+tokens_b)
for ii in range(2):
random.shuffle(other_3_examples_in_the_group)
tail_seq = []
for ex_i in other_3_examples_in_the_group:
tail_seq += [sep_token]+tokenizer.tokenize(ex_i.text_b)+[sep_token]
tokens_b_concatenated.append(tokens_b+[sep_token]+tail_seq)
for tokens_b in tokens_b_concatenated:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3". " -4" for RoBERTa.
special_tokens_count = 7 if sep_token_extra else 3
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
tokens += tokens_b
segment_ids += [sequence_b_segment_id] * (len(tokens_b))
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def main(args_task_name, args_do_train, args_do_lower_case, args_data_label,args_num_train_epochs,args_train_batch_size,args_eval_batch_size,args_learning_rate,args_max_seq_length, args_seed, args_train_file):
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--task_name",
default=args_task_name,
type=str,
help="The name of the task to train.")
parser.add_argument("--train_file",
default=args_train_file,
type=str,
help="The name of the task to train.")
## Other parameters
## Other parameters
parser.add_argument("--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--data_label",
default=args_data_label,
type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length",
default=args_max_seq_length,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=args_train_batch_size,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=args_eval_batch_size,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=args_learning_rate,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=args_num_train_epochs,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=args_seed,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
args.do_train = args_do_train
args.do_lower_case = args_do_lower_case
processors = {
"rte": RteProcessor
}
output_modes = {
"rte": "classification"
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
train_examples, _ = get_MCTest_examples(f'{args.train_file}.train', hypo_only=False)
dev_examples, _ = get_MCTest_examples(f'{args.train_file}.dev', hypo_only=False)
test_examples, _ = get_MCTest_examples(f'{args.train_file}.test', hypo_only=False)
label_list = ["entailment", "not_entailment"]#, "contradiction"]
num_labels = len(label_list)
print('num_labels:', num_labels, 'training size:', len(train_examples), 'dev size:', len(dev_examples), ' test size:', len(test_examples))
num_train_optimization_steps = None
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
model = RobertaForSequenceClassification(num_labels)
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)
model.load_state_dict(torch.load('E:/saved_model/MCTest/DocNLI.pretrained.RoBERTA.model.pt', map_location=device))
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
max_test_acc = 0.0
max_dev_acc = 0.0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
train_features_concatenate = convert_examples_to_features_concatenate(
train_examples, label_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
train_features+=train_features_concatenate
'''load dev set'''
dev_features = convert_examples_to_features(
dev_examples, label_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
dev_all_input_ids = torch.tensor([f.input_ids for f in dev_features], dtype=torch.long)
dev_all_input_mask = torch.tensor([f.input_mask for f in dev_features], dtype=torch.long)
dev_all_segment_ids = torch.tensor([f.segment_ids for f in dev_features], dtype=torch.long)
dev_all_label_ids = torch.tensor([f.label_id for f in dev_features], dtype=torch.long)
dev_data = TensorDataset(dev_all_input_ids, dev_all_input_mask, dev_all_segment_ids, dev_all_label_ids)
dev_sampler = SequentialSampler(dev_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=args.eval_batch_size)
'''load test set'''
test_features = convert_examples_to_features(
test_examples, label_list, args.max_seq_length, tokenizer, output_mode,
cls_token_at_end=False,#bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=0,#2 if args.model_type in ['xlnet'] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=True,#bool(args.model_type in ['roberta']), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=False,#bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0)#4 if args.model_type in ['xlnet'] else 0,)
test_all_input_ids = torch.tensor([f.input_ids for f in test_features], dtype=torch.long)
test_all_input_mask = torch.tensor([f.input_mask for f in test_features], dtype=torch.long)
test_all_segment_ids = torch.tensor([f.segment_ids for f in test_features], dtype=torch.long)
test_all_label_ids = torch.tensor([f.label_id for f in test_features], dtype=torch.long)
test_data = TensorDataset(test_all_input_ids, test_all_input_mask, test_all_segment_ids, test_all_label_ids)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler, batch_size=args.eval_batch_size)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_features))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
iter_co = 0
final_test_performance = 0.0
global_step = 0
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
# global_step += 1
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
logits = model(input_ids, input_mask)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
optimizer.step()
optimizer.zero_grad()
global_step += 1
iter_co+=1
'''
start evaluate on dev set after this epoch
'''
model.eval()
# dev_acc,_ = evaluation(dev_dataloader, device, model)
if global_step % 1200 == 0:
dev_acc,_ = evaluation(test_dataloader, device, model)
if dev_acc > max_dev_acc:
max_dev_acc = dev_acc
print('\ndev acc:', dev_acc, ' max_dev_acc:', max_dev_acc, '\n')
'''evaluate on the test set with the best dev model'''
final_test_performance,_ = evaluation(test_dataloader, device, model)
print('\ntest acc:', final_test_performance, '\n')
else:
print('\ndev acc:', dev_acc, ' max_dev_acc:', max_dev_acc, '\n')
print('final_test_performance:', final_test_performance)
def evaluation(dev_dataloader, device, model):
eval_loss = 0
nb_eval_steps = 0
preds = []
gold_label_ids = []
# print('Evaluating...')
for _, (input_ids, input_mask, segment_ids, label_ids) in enumerate(tqdm(dev_dataloader)):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
gold_label_ids+=list(label_ids.detach().cpu().numpy())
with torch.no_grad():
logits = model(input_ids, input_mask)
if len(preds) == 0:
preds.append(logits.detach().cpu().numpy())
else:
preds[0] = np.append(preds[0], logits.detach().cpu().numpy(), axis=0)
nb_eval_steps+=1
# print('eval_steps:', nb_eval_steps, '/', len(dev_dataloader))
preds = preds[0]
pred_probs = softmax(preds,axis=1)
prob_of_entail = list(pred_probs[:,0]) # entail is the first label
# pred_label_ids = list(np.argmax(pred_probs, axis=1))
gold_label_ids = [1-x for x in gold_label_ids] # 1 means entail hereafter
assert len(prob_of_entail) == len(gold_label_ids)
assert len(gold_label_ids) % 4 ==0
'''accuracy for multi-choice QA'''
prob_of_entail = np.array(prob_of_entail).reshape(len(prob_of_entail)//4, 4)
gold_label_ids = np.array(gold_label_ids).reshape(len(gold_label_ids)//4, 4)
question_size = gold_label_ids.shape[0]
hit=0
for i in range(question_size):
score_sublist = list(prob_of_entail[i])
gold_labellist = list(gold_label_ids[i])
assert sum(gold_labellist) == 1
if max(score_sublist) == score_sublist[gold_labellist.index(1)]:
hit+=1
# print(score_sublist, gold_labellist, hit)
acc = hit/question_size
'''NDCG4'''
ndcg = ndcg_score(gold_label_ids, prob_of_entail)
return acc, ndcg
if __name__ == "__main__":
args_task_name = 'rte'
args_do_train = True
args_do_lower_case = True
args_data_label = 'DUC'
args_num_train_epochs = 5
args_train_batch_size = 8
args_eval_batch_size = 64
args_learning_rate = 1e-6
args_max_seq_length = 512
args_seed = 42
args_train_file = 'mc500'
main(args_task_name, args_do_train, args_do_lower_case, args_data_label,args_num_train_epochs,args_train_batch_size,args_eval_batch_size,args_learning_rate,args_max_seq_length, args_seed, args_train_file)
| 39,264 | 44.977752 | 218 | py |
MaskedDenoising | MaskedDenoising-main/main_test_swinir_x8.py | import argparse
import cv2
import glob
import numpy as np
from collections import OrderedDict
import os
import torch
import requests
from models.network_swinir import SwinIR as net
from utils import utils_image as util
from utils import utils_option as option
import lpips
import torch
def transform(v, op):
# if self.precision != 'single': v = v.float()
v2np = v.data.cpu().numpy()
if op == 'v':
tfnp = v2np[:, :, :, ::-1].copy()
elif op == 'h':
tfnp = v2np[:, :, ::-1, :].copy()
elif op == 't':
tfnp = v2np.transpose((0, 1, 3, 2)).copy()
ret = torch.Tensor(tfnp).cuda()
# if self.precision == 'half': ret = ret.half()
return ret
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='lightweight_sr', help='classical_sr, lightweight_sr, real_sr, '
'gray_dn, color_dn, jpeg_car')
parser.add_argument('--scale', type=int, default=1, help='scale factor: 1, 2, 3, 4, 8') # 1 for dn and jpeg car
parser.add_argument('--noise', type=int, default=15, help='noise level: 15, 25, 50')
parser.add_argument('--jpeg', type=int, default=40, help='scale factor: 10, 20, 30, 40')
parser.add_argument('--training_patch_size', type=int, default=128, help='patch size used in training SwinIR. '
'Just used to differentiate two different settings in Table 2 of the paper. '
'Images are NOT tested patch by patch.')
parser.add_argument('--large_model', action='store_true', help='use large model, only provided for real image sr')
parser.add_argument('--model_path', type=str,
default='model_zoo/swinir/001_classicalSR_DIV2K_s48w8_SwinIR-M_x2.pth')
parser.add_argument('--folder_lq', type=str, default=None, help='input low-quality test image folder')
parser.add_argument('--folder_gt', type=str, default=None, help='input ground-truth test image folder')
parser.add_argument('--tile', type=int, default=None, help='Tile size, None for no tile during testing (testing as a whole)')
parser.add_argument('--tile_overlap', type=int, default=32, help='Overlapping of different tiles')
parser.add_argument('--opt', type=str, help='Path to option JSON file.')
parser.add_argument('--name', type=str, default="test", help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
global opt_net
opt_net = opt['netG']
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# set up model
if os.path.exists(args.model_path):
print(f'loading model from {args.model_path}')
else:
os.makedirs(os.path.dirname(args.model_path), exist_ok=True)
url = 'https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/{}'.format(os.path.basename(args.model_path))
r = requests.get(url, allow_redirects=True)
print(f'downloading model {args.model_path}')
open(args.model_path, 'wb').write(r.content)
model = define_model(args)
model.eval()
model = model.to(device)
# setup folder and path
folder, save_dir, border, window_size = setup(args)
# print(folder)
print(args.folder_lq)
os.makedirs(save_dir, exist_ok=True)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['psnr_y'] = []
test_results['ssim_y'] = []
test_results['psnr_b'] = []
test_results['lpips'] = []
psnr, ssim, psnr_y, ssim_y, psnr_b, lpips_ = 0, 0, 0, 0, 0, 0
loss_fn_alex = lpips.LPIPS(net='alex').cuda() # best forward scores
for idx, path in enumerate(sorted(glob.glob(os.path.join(folder, '*')))):
# print(1)
# read image
imgname, img_lq, img_gt = get_image_pair(args, path) # image to HWC-BGR, float32
img_lq = np.transpose(img_lq if img_lq.shape[2] == 1 else img_lq[:, :, [2, 1, 0]], (2, 0, 1)) # HCW-BGR to CHW-RGB
img_lq = torch.from_numpy(img_lq).float().unsqueeze(0).to(device) # CHW-RGB to NCHW-RGB
# inference
with torch.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_lq.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[:, :, :h_old + h_pad, :]
img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[:, :, :, :w_old + w_pad]
list_x = []
x = [img_lq]
for tf in 'v', 'h', 't': x.extend([transform(_x, tf) for _x in x])
list_x.append(x)
list_y = []
for x in zip(*list_x):
# print(len(x))
# y = forward_function(*x)
y = test(x[0], model, args, window_size)
if not isinstance(y, list): y = [y]
if not list_y:
list_y = [[_y] for _y in y]
else:
for _list_y, _y in zip(list_y, y): _list_y.append(_y)
for _list_y in list_y:
for i in range(len(_list_y)):
if i > 3:
_list_y[i] = transform(_list_y[i], 't')
if i % 4 > 1:
_list_y[i] = transform(_list_y[i], 'h')
if (i % 4) % 2 == 1:
_list_y[i] = transform(_list_y[i], 'v')
y = [torch.cat(_y, dim=0).mean(dim=0, keepdim=True) for _y in list_y]
if len(y) == 1: y = y[0]
output = y
# output = test(img_lq, model, args, window_size)
output = output[..., :h_old * args.scale, :w_old * args.scale]
# save image
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
if output.ndim == 3:
output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0)) # CHW-RGB to HCW-BGR
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
cv2.imwrite(f'{save_dir}/{imgname}_SwinIR.png', output)
# evaluate psnr/ssim/psnr_b
if img_gt is not None:
img_gt = (img_gt * 255.0).round().astype(np.uint8) # float32 to uint8
img_gt = img_gt[:h_old * args.scale, :w_old * args.scale, ...] # crop gt
img_gt = np.squeeze(img_gt)
psnr = util.calculate_psnr(output, img_gt, border=border)
ssim = util.calculate_ssim(output, img_gt, border=border)
lpips_ = loss_fn_alex(im2tensor(output).cuda(), im2tensor(img_gt).cuda()).item()
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
test_results['lpips'].append(lpips_)
if img_gt.ndim == 3: # RGB image
output_y = util.bgr2ycbcr(output.astype(np.float32) / 255.) * 255.
img_gt_y = util.bgr2ycbcr(img_gt.astype(np.float32) / 255.) * 255.
psnr_y = util.calculate_psnr(output_y, img_gt_y, border=border)
ssim_y = util.calculate_ssim(output_y, img_gt_y, border=border)
test_results['psnr_y'].append(psnr_y)
test_results['ssim_y'].append(ssim_y)
if args.task in ['jpeg_car']:
psnr_b = util.calculate_psnrb(output, img_gt, border=border)
test_results['psnr_b'].append(psnr_b)
# print('Testing {:d} {:20s} - PSNR: {:.2f} dB; SSIM: {:.4f}; '
# 'PSNR_Y: {:.2f} dB; SSIM_Y: {:.4f}; '
# 'PSNR_B: {:.2f} dB; LPIPS: {:.4f}'.
# format(idx, imgname, psnr, ssim, psnr_y, ssim_y, psnr_b, lpips_))
else:
print('Testing {:d} {:20s}'.format(idx, imgname))
# summarize psnr/ssim
if img_gt is not None:
ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
ave_lpips = sum(test_results['lpips']) / len(test_results['lpips'])
print('\n{} \n-- Average PSNR/SSIM(RGB): {:.2f} dB; {:.4f}'.format(save_dir, ave_psnr, ave_ssim))
if img_gt.ndim == 3:
ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
print('-- Average PSNR_Y/SSIM_Y/LPIPS: {:.2f}/{:.4f}/{:.4f}'.format(ave_psnr_y, ave_ssim_y, ave_lpips))
if args.task in ['jpeg_car']:
ave_psnr_b = sum(test_results['psnr_b']) / len(test_results['psnr_b'])
print('-- Average PSNR_B: {:.2f} dB'.format(ave_psnr_b))
def define_model(args):
# 001 classical image sr
if args.task == 'classical_sr':
model = net(upscale=args.scale, in_chans=3, img_size=args.training_patch_size, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='pixelshuffle', resi_connection='1conv')
param_key_g = 'params'
# 002 lightweight image sr
# use 'pixelshuffledirect' to save parameters
elif args.task == 'lightweight_sr':
# model = net(upscale=args.scale, in_chans=3, img_size=64, window_size=8,
# img_range=1., depths=[6, 6, 6, 6], embed_dim=60, num_heads=[6, 6, 6, 6],
# mlp_ratio=2, upsampler='pixelshuffledirect', resi_connection='1conv')
global opt_net
model = net(upscale=opt_net['upscale'],
in_chans=opt_net['in_chans'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
img_range=opt_net['img_range'],
depths=opt_net['depths'],
embed_dim=opt_net['embed_dim'],
num_heads=opt_net['num_heads'],
mlp_ratio=opt_net['mlp_ratio'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'],
talking_heads=opt_net['talking_heads'],
use_attn_fn=opt_net['attn_fn'],
head_scale=opt_net['head_scale'],
on_attn=opt_net['on_attn'],
use_mask=opt_net['use_mask'],
mask_ratio1=opt_net['mask_ratio1'],
mask_ratio2=opt_net['mask_ratio2'],
mask_is_diff=opt_net['mask_is_diff'],
type=opt_net['type'],
opt=opt_net,
)
param_key_g = 'params'
# 003 real-world image sr
elif args.task == 'real_sr':
if not args.large_model:
# use 'nearest+conv' to avoid block artifacts
model = net(upscale=4, in_chans=3, img_size=64, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='nearest+conv', resi_connection='1conv')
else:
# larger model size; use '3conv' to save parameters and memory; use ema for GAN training
model = net(upscale=4, in_chans=3, img_size=64, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], embed_dim=240,
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
mlp_ratio=2, upsampler='nearest+conv', resi_connection='3conv')
param_key_g = 'params_ema'
# 004 grayscale image denoising
elif args.task == 'gray_dn':
model = net(upscale=1, in_chans=1, img_size=128, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
# 005 color image denoising
elif args.task == 'color_dn':
model = net(upscale=1, in_chans=3, img_size=128, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
# 006 JPEG compression artifact reduction
# use window_size=7 because JPEG encoding uses 8x8; use img_range=255 because it's sligtly better than 1
elif args.task == 'jpeg_car':
model = net(upscale=1, in_chans=1, img_size=126, window_size=7,
img_range=255., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
pretrained_model = torch.load(args.model_path)
model.load_state_dict(pretrained_model[param_key_g] if param_key_g in pretrained_model.keys() else pretrained_model, strict=True)
return model
def setup(args):
# 001 classical image sr/ 002 lightweight image sr
if args.task in ['classical_sr', 'lightweight_sr']:
save_dir = f'results/{args.name}'
folder = args.folder_gt
border = args.scale
window_size = 8
# 003 real-world image sr
elif args.task in ['real_sr']:
save_dir = f'results/swinir_{args.task}_x{args.scale}'
if args.large_model:
save_dir += '_large'
folder = args.folder_lq
border = 0
window_size = 8
# 004 grayscale image denoising/ 005 color image denoising
elif args.task in ['gray_dn', 'color_dn']:
save_dir = f'results/swinir_{args.task}_noise{args.noise}'
folder = args.folder_gt
border = 0
window_size = 8
# 006 JPEG compression artifact reduction
elif args.task in ['jpeg_car']:
save_dir = f'results/swinir_{args.task}_jpeg{args.jpeg}'
folder = args.folder_gt
border = 0
window_size = 7
return folder, save_dir, border, window_size
def get_image_pair(args, path):
(imgname, imgext) = os.path.splitext(os.path.basename(path))
# 001 classical image sr/ 002 lightweight image sr (load lq-gt image pairs)
if args.task in ['classical_sr', 'lightweight_sr']:
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
# img_lq = cv2.imread(f'{args.folder_lq}/{imgname}_x{args.scale}{imgext}', cv2.IMREAD_COLOR).astype(np.float32) / 255.
img_lq = cv2.imread(f'{args.folder_lq}/{imgname}{imgext}', cv2.IMREAD_COLOR).astype(np.float32) / 255.
# 003 real-world image sr (load lq image only)
elif args.task in ['real_sr']:
img_gt = None
img_lq = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
# 004 grayscale image denoising (load gt image and generate lq image on-the-fly)
elif args.task in ['gray_dn']:
img_gt = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.
np.random.seed(seed=0)
img_lq = img_gt + np.random.normal(0, args.noise / 255., img_gt.shape)
img_gt = np.expand_dims(img_gt, axis=2)
img_lq = np.expand_dims(img_lq, axis=2)
# 005 color image denoising (load gt image and generate lq image on-the-fly)
elif args.task in ['color_dn']:
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
np.random.seed(seed=0)
img_lq = img_gt + np.random.normal(0, args.noise / 255., img_gt.shape)
# 006 JPEG compression artifact reduction (load gt image and generate lq image on-the-fly)
elif args.task in ['jpeg_car']:
img_gt = cv2.imread(path, 0)
result, encimg = cv2.imencode('.jpg', img_gt, [int(cv2.IMWRITE_JPEG_QUALITY), args.jpeg])
img_lq = cv2.imdecode(encimg, 0)
img_gt = np.expand_dims(img_gt, axis=2).astype(np.float32) / 255.
img_lq = np.expand_dims(img_lq, axis=2).astype(np.float32) / 255.
return imgname, img_lq, img_gt
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def test(img_lq, model, args, window_size):
if args.tile is None:
# test the image as a whole
output = model(img_lq)
else:
# test the image tile by tile
b, c, h, w = img_lq.size()
tile = min(args.tile, h, w)
assert tile % window_size == 0, "tile size should be a multiple of window_size"
tile_overlap = args.tile_overlap
sf = args.scale
stride = tile - tile_overlap
h_idx_list = list(range(0, h-tile, stride)) + [h-tile]
w_idx_list = list(range(0, w-tile, stride)) + [w-tile]
E = torch.zeros(b, c, h*sf, w*sf).type_as(img_lq)
W = torch.zeros_like(E)
for h_idx in h_idx_list:
for w_idx in w_idx_list:
in_patch = img_lq[..., h_idx:h_idx+tile, w_idx:w_idx+tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
E[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch)
W[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch_mask)
output = E.div_(W)
return output
if __name__ == '__main__':
main()
| 17,485 | 44.774869 | 133 | py |
MaskedDenoising | MaskedDenoising-main/main_test_swinir.py | import argparse
import cv2
import glob
import numpy as np
from collections import OrderedDict
import os
import torch
import requests
from models.network_swinir import SwinIR as net
from utils import utils_image as util
from utils import utils_option as option
import lpips
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='masked_denoising')
parser.add_argument('--scale', type=int, default=1, help='scale factor: 1, 2, 3, 4, 8') # 1 for dn and jpeg car
parser.add_argument('--noise', type=int, default=15, help='noise level: 15, 25, 50')
parser.add_argument('--jpeg', type=int, default=40, help='scale factor: 10, 20, 30, 40')
parser.add_argument('--training_patch_size', type=int, default=128, help='patch size used in training SwinIR. '
'Just used to differentiate two different settings in Table 2 of the paper. '
'Images are NOT tested patch by patch.')
parser.add_argument('--large_model', action='store_true', help='use large model, only provided for real image sr')
parser.add_argument('--model_path', type=str,
default='model_zoo/swinir/001_classicalSR_DIV2K_s48w8_SwinIR-M_x2.pth')
parser.add_argument('--folder_lq', type=str, default=None, help='input low-quality test image folder')
parser.add_argument('--folder_gt', type=str, default=None, help='input ground-truth test image folder')
parser.add_argument('--tile', type=int, default=None, help='Tile size, None for no tile during testing (testing as a whole)')
parser.add_argument('--tile_overlap', type=int, default=32, help='Overlapping of different tiles')
parser.add_argument('--opt', type=str, help='Path to option JSON file.')
parser.add_argument('--name', type=str, default="test", help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=False)
global opt_net
opt_net = opt['netG']
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# set up model
if os.path.exists(args.model_path):
print(f'loading model from {args.model_path}')
else:
os.makedirs(os.path.dirname(args.model_path), exist_ok=True)
url = 'https://github.com/JingyunLiang/SwinIR/releases/download/v0.0/{}'.format(os.path.basename(args.model_path))
r = requests.get(url, allow_redirects=True)
print(f'downloading model {args.model_path}')
open(args.model_path, 'wb').write(r.content)
model = define_model(args)
model.eval()
model = model.to(device)
# setup folder and path
folder, save_dir, border, window_size = setup(args)
print(folder)
os.makedirs(save_dir, exist_ok=True)
test_results = OrderedDict()
test_results['psnr'] = []
test_results['ssim'] = []
test_results['psnr_y'] = []
test_results['ssim_y'] = []
test_results['psnr_b'] = []
test_results['lpips'] = []
psnr, ssim, psnr_y, ssim_y, psnr_b, lpips_ = 0, 0, 0, 0, 0, 0
loss_fn_alex = lpips.LPIPS(net='alex').cuda() # best forward scores
for idx, path in enumerate(sorted(glob.glob(os.path.join(folder, '*')))):
# print(1)
# read image
imgname, img_lq, img_gt = get_image_pair(args, path) # image to HWC-BGR, float32
img_lq = np.transpose(img_lq if img_lq.shape[2] == 1 else img_lq[:, :, [2, 1, 0]], (2, 0, 1)) # HCW-BGR to CHW-RGB
img_lq = torch.from_numpy(img_lq).float().unsqueeze(0).to(device) # CHW-RGB to NCHW-RGB
# inference
with torch.no_grad():
# pad input image to be a multiple of window_size
_, _, h_old, w_old = img_lq.size()
h_pad = (h_old // window_size + 1) * window_size - h_old
w_pad = (w_old // window_size + 1) * window_size - w_old
img_lq = torch.cat([img_lq, torch.flip(img_lq, [2])], 2)[:, :, :h_old + h_pad, :]
img_lq = torch.cat([img_lq, torch.flip(img_lq, [3])], 3)[:, :, :, :w_old + w_pad]
output = test(img_lq, model, args, window_size)
output = output[..., :h_old * args.scale, :w_old * args.scale]
# save image
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
if output.ndim == 3:
output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0)) # CHW-RGB to HCW-BGR
output = (output * 255.0).round().astype(np.uint8) # float32 to uint8
cv2.imwrite(f'{save_dir}/{imgname}_SwinIR.png', output)
# evaluate psnr/ssim/psnr_b
if img_gt is not None:
img_gt = (img_gt * 255.0).round().astype(np.uint8) # float32 to uint8
img_gt = img_gt[:h_old * args.scale, :w_old * args.scale, ...] # crop gt
img_gt = np.squeeze(img_gt)
psnr = util.calculate_psnr(output, img_gt, border=border)
ssim = util.calculate_ssim(output, img_gt, border=border)
lpips_ = loss_fn_alex(im2tensor(output).cuda(), im2tensor(img_gt).cuda()).item()
test_results['psnr'].append(psnr)
test_results['ssim'].append(ssim)
test_results['lpips'].append(lpips_)
if img_gt.ndim == 3: # RGB image
output_y = util.bgr2ycbcr(output.astype(np.float32) / 255.) * 255.
img_gt_y = util.bgr2ycbcr(img_gt.astype(np.float32) / 255.) * 255.
psnr_y = util.calculate_psnr(output_y, img_gt_y, border=border)
ssim_y = util.calculate_ssim(output_y, img_gt_y, border=border)
test_results['psnr_y'].append(psnr_y)
test_results['ssim_y'].append(ssim_y)
print('Testing {:d} {:20s} - PSNR: {:.2f} dB; SSIM: {:.4f}; '
'PSNR_Y: {:.2f} dB; SSIM_Y: {:.4f}; '
'LPIPS: {:.4f}'.
format(idx, imgname, psnr, ssim, psnr_y, ssim_y, lpips_))
else:
print('Testing {:d} {:20s}'.format(idx, imgname))
# summarize psnr/ssim
if img_gt is not None:
ave_psnr = sum(test_results['psnr']) / len(test_results['psnr'])
ave_ssim = sum(test_results['ssim']) / len(test_results['ssim'])
ave_lpips = sum(test_results['lpips']) / len(test_results['lpips'])
print('\n{} \n-- Average PSNR/SSIM(RGB): {:.2f} dB; {:.4f}'.format(save_dir, ave_psnr, ave_ssim))
if img_gt.ndim == 3:
ave_psnr_y = sum(test_results['psnr_y']) / len(test_results['psnr_y'])
ave_ssim_y = sum(test_results['ssim_y']) / len(test_results['ssim_y'])
print('-- Average PSNR_Y/SSIM_Y/LPIPS: {:.2f}/{:.4f}/{:.4f}'.format(ave_psnr_y, ave_ssim_y, ave_lpips))
def define_model(args):
if args.task == 'masked_denoising':
global opt_net
model = net(upscale=opt_net['upscale'],
in_chans=opt_net['in_chans'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
img_range=opt_net['img_range'],
depths=opt_net['depths'],
embed_dim=opt_net['embed_dim'],
num_heads=opt_net['num_heads'],
mlp_ratio=opt_net['mlp_ratio'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'],
talking_heads=opt_net['talking_heads'],
use_attn_fn=opt_net['attn_fn'],
head_scale=opt_net['head_scale'],
on_attn=opt_net['on_attn'],
use_mask=opt_net['use_mask'],
mask_ratio1=opt_net['mask_ratio1'],
mask_ratio2=opt_net['mask_ratio2'],
mask_is_diff=opt_net['mask_is_diff'],
type=opt_net['type'],
opt=opt_net,
)
param_key_g = 'params'
# real-world image sr
elif args.task == 'real_sr':
if not args.large_model:
# use 'nearest+conv' to avoid block artifacts
model = net(upscale=4, in_chans=3, img_size=64, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='nearest+conv', resi_connection='1conv')
else:
# larger model size; use '3conv' to save parameters and memory; use ema for GAN training
model = net(upscale=4, in_chans=3, img_size=64, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6, 6, 6, 6], embed_dim=240,
num_heads=[8, 8, 8, 8, 8, 8, 8, 8, 8],
mlp_ratio=2, upsampler='nearest+conv', resi_connection='3conv')
param_key_g = 'params_ema'
# grayscale image denoising
elif args.task == 'gray_dn':
model = net(upscale=1, in_chans=1, img_size=128, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
# color image denoising
elif args.task == 'color_dn':
model = net(upscale=1, in_chans=3, img_size=128, window_size=8,
img_range=1., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
# JPEG compression artifact reduction
# use window_size=7 because JPEG encoding uses 8x8; use img_range=255 because it's sligtly better than 1
elif args.task == 'jpeg_car':
model = net(upscale=1, in_chans=1, img_size=126, window_size=7,
img_range=255., depths=[6, 6, 6, 6, 6, 6], embed_dim=180, num_heads=[6, 6, 6, 6, 6, 6],
mlp_ratio=2, upsampler='', resi_connection='1conv')
param_key_g = 'params'
pretrained_model = torch.load(args.model_path)
model.load_state_dict(pretrained_model[param_key_g] if param_key_g in pretrained_model.keys() else pretrained_model, strict=True)
return model
def setup(args):
# 001 classical image sr/ 002 lightweight image sr
if args.task in ['masked_denoising', 'classical_sr', 'lightweight_sr']:
save_dir = f'results/{args.name}'
folder = args.folder_gt
border = args.scale
window_size = 8
# 003 real-world image sr
elif args.task in ['real_sr']:
save_dir = f'results/swinir_{args.task}_x{args.scale}'
if args.large_model:
save_dir += '_large'
folder = args.folder_lq
border = 0
window_size = 8
# 004 grayscale image denoising/ 005 color image denoising
elif args.task in ['gray_dn', 'color_dn']:
save_dir = f'results/swinir_{args.task}_noise{args.noise}'
folder = args.folder_gt
border = 0
window_size = 8
# 006 JPEG compression artifact reduction
elif args.task in ['jpeg_car']:
save_dir = f'results/swinir_{args.task}_jpeg{args.jpeg}'
folder = args.folder_gt
border = 0
window_size = 7
return folder, save_dir, border, window_size
def get_image_pair(args, path):
(imgname, imgext) = os.path.splitext(os.path.basename(path))
# 001 classical image sr/ 002 lightweight image sr (load lq-gt image pairs)
if args.task in ['masked_denoising', 'classical_sr', 'lightweight_sr']:
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
# img_lq = cv2.imread(f'{args.folder_lq}/{imgname}_x{args.scale}{imgext}', cv2.IMREAD_COLOR).astype(np.float32) / 255.
# img_lq = cv2.imread(f'{args.folder_lq}/{imgname}{imgext}', cv2.IMREAD_COLOR).astype(np.float32) / 255.
try:
imgext = '.png'
img_lq = cv2.imread(f'{args.folder_lq}/{imgname}{imgext}', cv2.IMREAD_COLOR).astype(np.float32) / 255.
except:
imgext = '.tif'
img_lq = cv2.imread(f'{args.folder_lq}/{imgname}{imgext}', cv2.IMREAD_COLOR).astype(np.float32) / 255.
# 003 real-world image sr (load lq image only)
elif args.task in ['real_sr']:
img_gt = None
img_lq = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
# 004 grayscale image denoising (load gt image and generate lq image on-the-fly)
elif args.task in ['gray_dn']:
img_gt = cv2.imread(path, cv2.IMREAD_GRAYSCALE).astype(np.float32) / 255.
np.random.seed(seed=0)
img_lq = img_gt + np.random.normal(0, args.noise / 255., img_gt.shape)
img_gt = np.expand_dims(img_gt, axis=2)
img_lq = np.expand_dims(img_lq, axis=2)
# 005 color image denoising (load gt image and generate lq image on-the-fly)
elif args.task in ['color_dn']:
img_gt = cv2.imread(path, cv2.IMREAD_COLOR).astype(np.float32) / 255.
np.random.seed(seed=0)
img_lq = img_gt + np.random.normal(0, args.noise / 255., img_gt.shape)
# 006 JPEG compression artifact reduction (load gt image and generate lq image on-the-fly)
elif args.task in ['jpeg_car']:
img_gt = cv2.imread(path, 0)
result, encimg = cv2.imencode('.jpg', img_gt, [int(cv2.IMWRITE_JPEG_QUALITY), args.jpeg])
img_lq = cv2.imdecode(encimg, 0)
img_gt = np.expand_dims(img_gt, axis=2).astype(np.float32) / 255.
img_lq = np.expand_dims(img_lq, axis=2).astype(np.float32) / 255.
return imgname, img_lq, img_gt
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def test(img_lq, model, args, window_size):
if args.tile is None:
# test the image as a whole
output = model(img_lq)
else:
# test the image tile by tile
b, c, h, w = img_lq.size()
tile = min(args.tile, h, w)
assert tile % window_size == 0, "tile size should be a multiple of window_size"
tile_overlap = args.tile_overlap
sf = args.scale
stride = tile - tile_overlap
h_idx_list = list(range(0, h-tile, stride)) + [h-tile]
w_idx_list = list(range(0, w-tile, stride)) + [w-tile]
E = torch.zeros(b, c, h*sf, w*sf).type_as(img_lq)
W = torch.zeros_like(E)
for h_idx in h_idx_list:
for w_idx in w_idx_list:
in_patch = img_lq[..., h_idx:h_idx+tile, w_idx:w_idx+tile]
out_patch = model(in_patch)
out_patch_mask = torch.ones_like(out_patch)
E[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch)
W[..., h_idx*sf:(h_idx+tile)*sf, w_idx*sf:(w_idx+tile)*sf].add_(out_patch_mask)
output = E.div_(W)
return output
if __name__ == '__main__':
main()
| 15,007 | 45.9 | 133 | py |
MaskedDenoising | MaskedDenoising-main/main_train_psnr.py | import os.path
import math
import argparse
import time
import random
import numpy as np
from collections import OrderedDict
import logging
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch
from utils import utils_logger
from utils import utils_image as util
from utils import utils_option as option
from utils.utils_dist import get_dist_info, init_dist
from data.select_dataset import define_Dataset
from models.select_model import define_Model
import lpips
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
'''
# --------------------------------------------
# training code for MSRResNet
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
# github: https://github.com/cszn/KAIR
# --------------------------------------------
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
torch.backends.cudnn.enabled = False
def im2tensor(image, imtype=np.uint8, cent=1., factor=255./2.):
return torch.Tensor((image / factor - cent)
[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
def main(json_path='options/train_msrresnet_psnr.json'):
'''
# ----------------------------------------
# Step--1 (prepare opt)
# ----------------------------------------
'''
parser = argparse.ArgumentParser()
parser.add_argument('--opt', type=str, default=json_path, help='Path to option JSON file.')
parser.add_argument('--launcher', default='pytorch', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dist', default=False)
opt = option.parse(parser.parse_args().opt, is_train=True)
opt['dist'] = parser.parse_args().dist
writer = SummaryWriter('./runs/' + opt['task'])
# ----------------------------------------
# distributed settings
# ----------------------------------------
if opt['dist']:
init_dist('pytorch')
opt['rank'], opt['world_size'] = get_dist_info()
if opt['rank'] == 0:
util.mkdirs((path for key, path in opt['path'].items() if 'pretrained' not in key))
# ----------------------------------------
# update opt
# ----------------------------------------
# -->-->-->-->-->-->-->-->-->-->-->-->-->-
init_iter_G, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G')
init_iter_E, init_path_E = option.find_last_checkpoint(opt['path']['models'], net_type='E')
opt['path']['pretrained_netG'] = init_path_G
opt['path']['pretrained_netE'] = init_path_E
init_iter_optimizerG, init_path_optimizerG = option.find_last_checkpoint(opt['path']['models'], net_type='optimizerG')
opt['path']['pretrained_optimizerG'] = init_path_optimizerG
current_step = max(init_iter_G, init_iter_E, init_iter_optimizerG)
# current_step = 0
border = opt['scale']
# --<--<--<--<--<--<--<--<--<--<--<--<--<-
# ----------------------------------------
# save opt to a '../option.json' file
# ----------------------------------------
if opt['rank'] == 0:
option.save(opt)
# ----------------------------------------
# return None for missing key
# ----------------------------------------
opt = option.dict_to_nonedict(opt)
# ----------------------------------------
# configure logger
# ----------------------------------------
if opt['rank'] == 0:
logger_name = 'train'
utils_logger.logger_info(logger_name, os.path.join(opt['path']['log'], logger_name+'.log'))
logger = logging.getLogger(logger_name)
logger.info(option.dict2str(opt))
# ----------------------------------------
# seed
# ----------------------------------------
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
print('Random seed: {}'.format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
'''
# ----------------------------------------
# Step--2 (creat dataloader)
# ----------------------------------------
'''
# ----------------------------------------
# 1) create_dataset
# 2) creat_dataloader for train and test
# ----------------------------------------
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = define_Dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['dataloader_batch_size']))
if opt['rank'] == 0:
logger.info('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
if opt['dist']:
# train_sampler = DistributedSampler(train_set, shuffle=dataset_opt['dataloader_shuffle'], drop_last=True, seed=seed)
train_sampler = DistributedSampler(train_set, shuffle=dataset_opt['dataloader_shuffle'])
train_loader = DataLoader(train_set,
batch_size=dataset_opt['dataloader_batch_size']//opt['num_gpu'],
shuffle=False,
num_workers=dataset_opt['dataloader_num_workers']//opt['num_gpu'],
drop_last=True,
pin_memory=True,
sampler=train_sampler)
else:
train_loader = DataLoader(train_set,
batch_size=dataset_opt['dataloader_batch_size'],
shuffle=dataset_opt['dataloader_shuffle'],
num_workers=dataset_opt['dataloader_num_workers'],
drop_last=True,
pin_memory=True)
elif phase == 'test':
test_set = define_Dataset(dataset_opt)
test_loader = DataLoader(test_set, batch_size=1,
shuffle=False, num_workers=1,
drop_last=False, pin_memory=True)
else:
raise NotImplementedError("Phase [%s] is not recognized." % phase)
'''
# ----------------------------------------
# Step--3 (initialize model)
# ----------------------------------------
'''
model = define_Model(opt)
model.init_train()
if opt['rank'] == 0:
logger.info(model.info_network())
logger.info(model.info_params())
# ==================================================================
loss_fn_alex = lpips.LPIPS(net='alex').cuda()
best_PSNRY = 0
best_step = 0
# ==================================================================
'''
# ----------------------------------------
# Step--4 (main training)
# ----------------------------------------
'''
for epoch in range(1000000): # keep running
if opt['dist']:
train_sampler.set_epoch(epoch)
for i, train_data in enumerate(train_loader):
current_step += 1
# -------------------------------
# 1) update learning rate
# -------------------------------
model.update_learning_rate(current_step)
# -------------------------------
# 2) feed patch pairs
# -------------------------------
model.feed_data(train_data)
# -------------------------------
# 3) optimize parameters
# -------------------------------
model.optimize_parameters(current_step)
# -------------------------------
# 4) training information
# -------------------------------
if current_step % opt['train']['checkpoint_print'] == 0 and opt['rank'] == 0:
logs = model.current_log() # such as loss
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(epoch, current_step, model.current_learning_rate())
for k, v in logs.items(): # merge log information into message
message += '{:s}: {:.3e} '.format(k, v)
# ----------------------------------------
writer.add_scalar('loss', v, global_step=current_step)
# ----------------------------------------
logger.info(message)
# -------------------------------
# 5) save model
# -------------------------------
if current_step % opt['train']['checkpoint_save'] == 0 and opt['rank'] == 0:
logger.info('Saving the model.')
model.save(current_step)
# -------------------------------
# 6) testing
# -------------------------------
if current_step % opt['train']['checkpoint_test'] == 0 and opt['rank'] == 0:
avg_psnr = 0.0
avg_ssim = 0.0
avg_psnrY = 0.0
avg_ssimY = 0.0
avg_lpips = 0.0
idx = 0
save_list = []
for test_data in test_loader:
idx += 1
image_name_ext = os.path.basename(test_data['L_path'][0])
img_name, ext = os.path.splitext(image_name_ext)
img_dir = os.path.join(opt['path']['images'], img_name)
util.mkdir(img_dir)
model.feed_data(test_data)
model.test()
visuals = model.current_visuals()
E_img = util.tensor2uint(visuals['E'])
H_img = util.tensor2uint(visuals['H'])
# -----------------------
# save estimated image E
# -----------------------
save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(img_name, current_step))
util.imsave(E_img, save_img_path)
# -----------------------
# calculate PSNR
# -----------------------
current_psnr = util.calculate_psnr(E_img, H_img, border=border)
# ==================================================================
current_ssim = util.calculate_ssim(E_img, H_img, border=border)
current_lpips = loss_fn_alex(im2tensor(E_img).cuda(), im2tensor(H_img).cuda()).item()
output_y = util.bgr2ycbcr(E_img.astype(np.float32) / 255.) * 255.
img_gt_y = util.bgr2ycbcr(H_img.astype(np.float32) / 255.) * 255.
psnr_y = util.calculate_psnr(output_y, img_gt_y, border=border)
ssim_y = util.calculate_ssim(output_y, img_gt_y, border=border)
# ==================================================================
logger.info('{:->4d}--> {:>20s} | PSNR: {:<4.2f}, SSIM: {:<5.4f}, PSNRY: {:<4.2f}, SSIMY: {:<5.4f}, LPIPS: {:<5.4f},'.format(idx, image_name_ext, current_psnr, current_ssim, psnr_y, ssim_y, current_lpips))
# logger.info('{:->4d}--> {:>10s} | {:<4.2f}dB'.format(idx, image_name_ext, current_psnr))
avg_psnr += current_psnr
avg_ssim += current_ssim
avg_psnrY += psnr_y
avg_ssimY += ssim_y
avg_lpips += current_lpips
if img_name in opt['train']['save_image']:
print(img_name)
save_list.append(util.uint2tensor3(E_img)[:, :512, :512])
avg_psnr = avg_psnr / idx
avg_ssim = avg_ssim / idx
avg_psnrY = avg_psnrY / idx
avg_ssimY = avg_ssimY / idx
avg_lpips = avg_lpips / idx
if len(save_list) > 0 and current_step % opt['train']['checkpoint_save'] == 0 and opt['rank'] == 0:
save_images = make_grid(save_list, nrow=len(save_list))
writer.add_image("test", save_images, global_step=current_step)
# avg_psnr += current_psnr
# avg_psnr = avg_psnr / idx
if avg_psnrY >= best_PSNRY:
best_step = current_step
best_PSNRY = avg_psnrY
# testing log
# logger.info('<epoch:{:3d}, iter:{:8,d}, Average PSNR : {:<.2f}dB\n'.format(epoch, current_step, avg_psnr))
logger.info('<epoch:{:3d}, iter:{:8,d}, Average: PSNR: {:<.2f}, SSIM: {:<.4f}, PSNRY: {:<.2f}, SSIMY: {:<.4f}, LPIPS: {:<.4f}'.format(epoch, current_step, avg_psnr, avg_ssim, avg_psnrY, avg_ssimY, avg_lpips))
logger.info('--- best PSNRY ---> iter:{:8,d}, Average: PSNR: {:<.2f}\n'.format(best_step, best_PSNRY))
writer.add_scalar('PSNRY', avg_psnrY, global_step=current_step)
writer.add_scalar('SSIMY', avg_ssimY, global_step=current_step)
writer.add_scalar('LPIPS', avg_lpips, global_step=current_step)
if __name__ == '__main__':
main()
| 13,438 | 39.236527 | 225 | py |
MaskedDenoising | MaskedDenoising-main/models/network_cnn.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
def init_weights(modules):
pass
class BasicBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
ksize=3, stride=1, pad=1):
super(BasicBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True)
)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
class ResidualBlock(nn.Module):
def __init__(self,
in_channels, out_channels):
super(ResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1),
)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
out = F.relu(out + x)
return out
class CNN5Layer(nn.Module):
def __init__(self):
super(CNN5Layer, self).__init__()
n_feats = 32
kernel_size = 3
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = MeanShift(1, rgb_mean, rgb_std)
self.add_mean = MeanShift(1, rgb_mean, rgb_std, 1)
self.head = BasicBlock(3, n_feats, kernel_size, 1, 1)
self.b1 = BasicBlock(n_feats, n_feats, kernel_size, 1, 1)
self.b2 = BasicBlock(n_feats, n_feats, kernel_size, 1, 1)
self.b3 = BasicBlock(n_feats, n_feats, kernel_size, 1, 1)
self.b4 = BasicBlock(n_feats, n_feats, kernel_size, 1, 1)
self.tail = nn.Conv2d(n_feats, 3, kernel_size, 1, 1, 1)
def forward(self, x):
s = self.sub_mean(x)
h = self.head(s)
b1 = self.b1(h)
b2 = self.b2(b1)
b3 = self.b3(b2)
b_out = self.b4(b3)
res = self.tail(b_out)
out = self.add_mean(res)
f_out = out + x
return f_out | 2,591 | 25.181818 | 69 | py |
MaskedDenoising | MaskedDenoising-main/models/model_base.py | import os
import torch
import torch.nn as nn
from utils.utils_bnorm import merge_bn, tidy_sequential
from torch.nn.parallel import DataParallel, DistributedDataParallel
class ModelBase():
def __init__(self, opt):
self.opt = opt # opt
self.save_dir = opt['path']['models'] # save models
self.device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
self.is_train = opt['is_train'] # training or not
self.schedulers = [] # schedulers
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
def init_train(self):
pass
def load(self):
pass
def save(self, label):
pass
def define_loss(self):
pass
def define_optimizer(self):
pass
def define_scheduler(self):
pass
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
def feed_data(self, data):
pass
def optimize_parameters(self):
pass
def current_visuals(self):
pass
def current_losses(self):
pass
def update_learning_rate(self, n):
for scheduler in self.schedulers:
scheduler.step(n)
def current_learning_rate(self):
return self.schedulers[0].get_lr()[0]
def requires_grad(self, model, flag=True):
for p in model.parameters():
p.requires_grad = flag
"""
# ----------------------------------------
# Information of net
# ----------------------------------------
"""
def print_network(self):
pass
def info_network(self):
pass
def print_params(self):
pass
def info_params(self):
pass
def get_bare_model(self, network):
"""Get bare model, especially under wrapping with
DistributedDataParallel or DataParallel.
"""
if isinstance(network, (DataParallel, DistributedDataParallel)):
network = network.module
return network
def model_to_device(self, network):
"""Model to device. It also warps models with DistributedDataParallel
or DataParallel.
Args:
network (nn.Module)
"""
network = network.to(self.device)
if self.opt['dist']:
find_unused_parameters = self.opt.get('find_unused_parameters', True)
use_static_graph = self.opt.get('use_static_graph', False)
network = DistributedDataParallel(network, device_ids=[torch.cuda.current_device()], find_unused_parameters=find_unused_parameters)
if use_static_graph:
print('Using static graph. Make sure that "unused parameters" will not change during training loop.')
network._set_static_graph()
else:
network = DataParallel(network)
return network
# ----------------------------------------
# network name and number of parameters
# ----------------------------------------
def describe_network(self, network):
network = self.get_bare_model(network)
msg = '\n'
msg += 'Networks name: {}'.format(network.__class__.__name__) + '\n'
msg += 'Params number: {}'.format(sum(map(lambda x: x.numel(), network.parameters()))) + '\n'
msg += 'Net structure:\n{}'.format(str(network)) + '\n'
return msg
# ----------------------------------------
# parameters description
# ----------------------------------------
def describe_params(self, network):
network = self.get_bare_model(network)
msg = '\n'
msg += ' | {:^6s} | {:^6s} | {:^6s} | {:^6s} || {:<20s}'.format('mean', 'min', 'max', 'std', 'shape', 'param_name') + '\n'
for name, param in network.state_dict().items():
if not 'num_batches_tracked' in name:
v = param.data.clone().float()
msg += ' | {:>6.3f} | {:>6.3f} | {:>6.3f} | {:>6.3f} | {} || {:s}'.format(v.mean(), v.min(), v.max(), v.std(), v.shape, name) + '\n'
return msg
"""
# ----------------------------------------
# Save prameters
# Load prameters
# ----------------------------------------
"""
# ----------------------------------------
# save the state_dict of the network
# ----------------------------------------
def save_network(self, save_dir, network, network_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, network_label)
save_path = os.path.join(save_dir, save_filename)
network = self.get_bare_model(network)
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
# ----------------------------------------
# load the state_dict of the network
# ----------------------------------------
def load_network(self, load_path, network, strict=True, param_key='params'):
network = self.get_bare_model(network)
if strict:
state_dict = torch.load(load_path)
if param_key in state_dict.keys():
state_dict = state_dict[param_key]
network.load_state_dict(state_dict, strict=strict)
else:
state_dict_old = torch.load(load_path)
if param_key in state_dict_old.keys():
state_dict_old = state_dict_old[param_key]
state_dict = network.state_dict()
for ((key_old, param_old),(key, param)) in zip(state_dict_old.items(), state_dict.items()):
state_dict[key] = param_old
network.load_state_dict(state_dict, strict=True)
del state_dict_old, state_dict
# ----------------------------------------
# save the state_dict of the optimizer
# ----------------------------------------
def save_optimizer(self, save_dir, optimizer, optimizer_label, iter_label):
save_filename = '{}_{}.pth'.format(iter_label, optimizer_label)
save_path = os.path.join(save_dir, save_filename)
torch.save(optimizer.state_dict(), save_path)
# ----------------------------------------
# load the state_dict of the optimizer
# ----------------------------------------
def load_optimizer(self, load_path, optimizer):
optimizer.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage.cuda(torch.cuda.current_device())))
def update_E(self, decay=0.999):
netG = self.get_bare_model(self.netG)
netG_params = dict(netG.named_parameters())
netE_params = dict(self.netE.named_parameters())
for k in netG_params.keys():
netE_params[k].data.mul_(decay).add_(netG_params[k].data, alpha=1-decay)
"""
# ----------------------------------------
# Merge Batch Normalization for training
# Merge Batch Normalization for testing
# ----------------------------------------
"""
# ----------------------------------------
# merge bn during training
# ----------------------------------------
def merge_bnorm_train(self):
merge_bn(self.netG)
tidy_sequential(self.netG)
self.define_optimizer()
self.define_scheduler()
# ----------------------------------------
# merge bn before testing
# ----------------------------------------
def merge_bnorm_test(self):
merge_bn(self.netG)
tidy_sequential(self.netG)
| 7,712 | 33.900452 | 148 | py |
MaskedDenoising | MaskedDenoising-main/models/network_rnan.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# def make_model(args, parent=False):
# return RNAN(args)
### RNAN
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, bias=False,
bn=True, act=nn.ReLU(True)):
m = [nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), stride=stride, bias=bias)
]
if bn: m.append(nn.BatchNorm2d(out_channels))
if act is not None: m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if i == 0: m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feat, 4 * n_feat, 3, bias))
m.append(nn.PixelShuffle(2))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
elif scale == 3:
m.append(conv(n_feat, 9 * n_feat, 3, bias))
m.append(nn.PixelShuffle(3))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
# add NonLocalBlock2D
# reference: https://github.com/AlexHex7/Non-local_pytorch/blob/master/lib/non_local_simple_version.py
class NonLocalBlock2D(nn.Module):
def __init__(self, in_channels, inter_channels):
super(NonLocalBlock2D, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.W = nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0)
nn.init.constant(self.W.weight, 0)
nn.init.constant(self.W.bias, 0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0,2,1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0,2,1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0,2,1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
## define trunk branch
class TrunkBranch(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(TrunkBranch, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
tx = self.body(x)
return tx
## define mask branch
class MaskBranchDownUp(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(MaskBranchDownUp, self).__init__()
MB_RB1 = []
MB_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Down = []
MB_Down.append(nn.Conv2d(n_feat,n_feat, 3, stride=2, padding=1))
MB_RB2 = []
for i in range(2):
MB_RB2.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Up = []
MB_Up.append(nn.ConvTranspose2d(n_feat,n_feat, 6, stride=2, padding=2))
MB_RB3 = []
MB_RB3.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_1x1conv = []
MB_1x1conv.append(nn.Conv2d(n_feat,n_feat, 1, padding=0, bias=True))
MB_sigmoid = []
MB_sigmoid.append(nn.Sigmoid())
self.MB_RB1 = nn.Sequential(*MB_RB1)
self.MB_Down = nn.Sequential(*MB_Down)
self.MB_RB2 = nn.Sequential(*MB_RB2)
self.MB_Up = nn.Sequential(*MB_Up)
self.MB_RB3 = nn.Sequential(*MB_RB3)
self.MB_1x1conv = nn.Sequential(*MB_1x1conv)
self.MB_sigmoid = nn.Sequential(*MB_sigmoid)
def forward(self, x):
x_RB1 = self.MB_RB1(x)
x_Down = self.MB_Down(x_RB1)
x_RB2 = self.MB_RB2(x_Down)
x_Up = self.MB_Up(x_RB2)
x_preRB3 = x_RB1 + x_Up
x_RB3 = self.MB_RB3(x_preRB3)
x_1x1 = self.MB_1x1conv(x_RB3)
mx = self.MB_sigmoid(x_1x1)
return mx
## define nonlocal mask branch
class NLMaskBranchDownUp(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(NLMaskBranchDownUp, self).__init__()
MB_RB1 = []
MB_RB1.append(NonLocalBlock2D(n_feat, n_feat // 2))
MB_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Down = []
MB_Down.append(nn.Conv2d(n_feat,n_feat, 3, stride=2, padding=1))
MB_RB2 = []
for i in range(2):
MB_RB2.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Up = []
MB_Up.append(nn.ConvTranspose2d(n_feat,n_feat, 6, stride=2, padding=2))
MB_RB3 = []
MB_RB3.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_1x1conv = []
MB_1x1conv.append(nn.Conv2d(n_feat,n_feat, 1, padding=0, bias=True))
MB_sigmoid = []
MB_sigmoid.append(nn.Sigmoid())
self.MB_RB1 = nn.Sequential(*MB_RB1)
self.MB_Down = nn.Sequential(*MB_Down)
self.MB_RB2 = nn.Sequential(*MB_RB2)
self.MB_Up = nn.Sequential(*MB_Up)
self.MB_RB3 = nn.Sequential(*MB_RB3)
self.MB_1x1conv = nn.Sequential(*MB_1x1conv)
self.MB_sigmoid = nn.Sequential(*MB_sigmoid)
def forward(self, x):
x_RB1 = self.MB_RB1(x)
x_Down = self.MB_Down(x_RB1)
x_RB2 = self.MB_RB2(x_Down)
x_Up = self.MB_Up(x_RB2)
x_preRB3 = x_RB1 + x_Up
x_RB3 = self.MB_RB3(x_preRB3)
x_1x1 = self.MB_1x1conv(x_RB3)
mx = self.MB_sigmoid(x_1x1)
return mx
## define residual attention module
class ResAttModuleDownUpPlus(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResAttModuleDownUpPlus, self).__init__()
RA_RB1 = []
RA_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_TB = []
RA_TB.append(TrunkBranch(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_MB = []
RA_MB.append(MaskBranchDownUp(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_tail = []
for i in range(2):
RA_tail.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
self.RA_RB1 = nn.Sequential(*RA_RB1)
self.RA_TB = nn.Sequential(*RA_TB)
self.RA_MB = nn.Sequential(*RA_MB)
self.RA_tail = nn.Sequential(*RA_tail)
def forward(self, input):
RA_RB1_x = self.RA_RB1(input)
tx = self.RA_TB(RA_RB1_x)
mx = self.RA_MB(RA_RB1_x)
txmx = tx * mx
hx = txmx + RA_RB1_x
hx = self.RA_tail(hx)
return hx
## define nonlocal residual attention module
class NLResAttModuleDownUpPlus(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(NLResAttModuleDownUpPlus, self).__init__()
RA_RB1 = []
RA_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_TB = []
RA_TB.append(TrunkBranch(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_MB = []
RA_MB.append(NLMaskBranchDownUp(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_tail = []
for i in range(2):
RA_tail.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
self.RA_RB1 = nn.Sequential(*RA_RB1)
self.RA_TB = nn.Sequential(*RA_TB)
self.RA_MB = nn.Sequential(*RA_MB)
self.RA_tail = nn.Sequential(*RA_tail)
def forward(self, input):
RA_RB1_x = self.RA_RB1(input)
tx = self.RA_TB(RA_RB1_x)
mx = self.RA_MB(RA_RB1_x)
txmx = tx * mx
hx = txmx + RA_RB1_x
hx = self.RA_tail(hx)
return hx
class _ResGroup(nn.Module):
def __init__(self, conv, n_feats, kernel_size, act, res_scale):
super(_ResGroup, self).__init__()
modules_body = []
modules_body.append(ResAttModuleDownUpPlus(conv, n_feats, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
modules_body.append(conv(n_feats, n_feats, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
return res
class _NLResGroup(nn.Module):
def __init__(self, conv, n_feats, kernel_size, act, res_scale):
super(_NLResGroup, self).__init__()
modules_body = []
modules_body.append(NLResAttModuleDownUpPlus(conv, n_feats, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
modules_body.append(conv(n_feats, n_feats, kernel_size))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
return res
class RNAN(nn.Module):
def __init__(self, conv=default_conv):
super(RNAN, self).__init__()
n_resgroup = 10
n_resblock = 16
n_feats = 64
kernel_size = 3
reduction = 16
scale = 1
act = nn.ReLU(True)
# define head module
modules_head = [conv(3, n_feats, kernel_size)]
# define body module
modules_body_nl_low = [
_NLResGroup(
conv, n_feats, kernel_size, act=act, res_scale=1)]
modules_body = [
_ResGroup(
conv, n_feats, kernel_size, act=act, res_scale=1) \
for _ in range(n_resgroup - 2)]
modules_body_nl_high = [
_NLResGroup(
conv, n_feats, kernel_size, act=act, res_scale=1)]
modules_body.append(conv(n_feats, n_feats, kernel_size))
# define tail module
modules_tail = [
conv(n_feats, 3, kernel_size)]
self.head = nn.Sequential(*modules_head)
self.body_nl_low = nn.Sequential(*modules_body_nl_low)
self.body = nn.Sequential(*modules_body)
self.body_nl_high = nn.Sequential(*modules_body_nl_high)
self.tail = nn.Sequential(*modules_tail)
def forward(self, x):
feats_shallow = self.head(x)
res = self.body_nl_low(feats_shallow)
res = self.body(res)
res = self.body_nl_high(res)
res_main = self.tail(res)
res_clean = x + res_main
return res_clean
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') >= 0:
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing)) | 14,692 | 34.404819 | 134 | py |
MaskedDenoising | MaskedDenoising-main/models/select_network.py | import functools
import torch
from torch.nn import init
"""
# --------------------------------------------
# select the network of G, D and F
# --------------------------------------------
"""
# --------------------------------------------
# Generator, netG, G
# --------------------------------------------
def define_G(opt):
opt_net = opt['netG']
net_type = opt_net['net_type']
# ----------------------------------------
# denoising task
# ----------------------------------------
# ----------------------------------------
# DnCNN
# ----------------------------------------
if net_type == 'dncnn':
from models.network_dncnn import DnCNN as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'], # total number of conv layers
act_mode=opt_net['act_mode'])
# ----------------------------------------
# RNAN
# ----------------------------------------
if net_type == 'rnan':
from models.network_rnan import RNAN as net
netG = net()
# ----------------------------------------
# MIRNet
# ----------------------------------------
if net_type == 'mirnet':
from models.network_mirnet import MIRNet as net
netG = net()
# ----------------------------------------
# RIDNet
# ----------------------------------------
if net_type == 'ridnet':
from models.network_ridnet import RIDNET as net
netG = net()
if net_type == 'cnn':
from models.network_cnn import CNN5Layer as net
netG = net()
# ----------------------------------------
# Flexible DnCNN
# ----------------------------------------
elif net_type == 'fdncnn':
from models.network_dncnn import FDnCNN as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'], # total number of conv layers
act_mode=opt_net['act_mode'])
# ----------------------------------------
# FFDNet
# ----------------------------------------
elif net_type == 'ffdnet':
from models.network_ffdnet import FFDNet as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
act_mode=opt_net['act_mode'])
# ----------------------------------------
# others
# ----------------------------------------
# ----------------------------------------
# super-resolution task
# ----------------------------------------
# ----------------------------------------
# SRMD
# ----------------------------------------
elif net_type == 'srmd':
from models.network_srmd import SRMD as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
upscale=opt_net['scale'],
act_mode=opt_net['act_mode'],
upsample_mode=opt_net['upsample_mode'])
# ----------------------------------------
# super-resolver prior of DPSR
# ----------------------------------------
elif net_type == 'dpsr':
from models.network_dpsr import MSRResNet_prior as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
upscale=opt_net['scale'],
act_mode=opt_net['act_mode'],
upsample_mode=opt_net['upsample_mode'])
# ----------------------------------------
# modified SRResNet v0.0
# ----------------------------------------
elif net_type == 'msrresnet0':
from models.network_msrresnet import MSRResNet0 as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
upscale=opt_net['scale'],
act_mode=opt_net['act_mode'],
upsample_mode=opt_net['upsample_mode'])
# ----------------------------------------
# modified SRResNet v0.1
# ----------------------------------------
elif net_type == 'msrresnet1':
from models.network_msrresnet import MSRResNet1 as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
upscale=opt_net['scale'],
act_mode=opt_net['act_mode'],
upsample_mode=opt_net['upsample_mode'])
# ----------------------------------------
# RRDB
# ----------------------------------------
elif net_type == 'rrdb': # RRDB
from models.network_rrdb import RRDB as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
gc=opt_net['gc'],
upscale=opt_net['scale'],
act_mode=opt_net['act_mode'],
upsample_mode=opt_net['upsample_mode'])
# ----------------------------------------
# RRDBNet
# ----------------------------------------
elif net_type == 'rrdbnet': # RRDBNet
from models.network_rrdbnet import RRDBNet as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nf=opt_net['nf'],
nb=opt_net['nb'],
gc=opt_net['gc'],
sf=opt_net['scale'])
# ----------------------------------------
# IMDB
# ----------------------------------------
elif net_type == 'imdn': # IMDB
from models.network_imdn import IMDN as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
upscale=opt_net['scale'],
act_mode=opt_net['act_mode'],
upsample_mode=opt_net['upsample_mode'])
# ----------------------------------------
# USRNet
# ----------------------------------------
elif net_type == 'usrnet': # USRNet
from models.network_usrnet import USRNet as net
netG = net(n_iter=opt_net['n_iter'],
h_nc=opt_net['h_nc'],
in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
act_mode=opt_net['act_mode'],
downsample_mode=opt_net['downsample_mode'],
upsample_mode=opt_net['upsample_mode']
)
# ----------------------------------------
# Deep Residual U-Net (drunet)
# ----------------------------------------
elif net_type == 'drunet':
from models.network_unet import UNetRes as net
netG = net(in_nc=opt_net['in_nc'],
out_nc=opt_net['out_nc'],
nc=opt_net['nc'],
nb=opt_net['nb'],
act_mode=opt_net['act_mode'],
downsample_mode=opt_net['downsample_mode'],
upsample_mode=opt_net['upsample_mode'],
bias=opt_net['bias'])
# ----------------------------------------
# SwinIR
# ----------------------------------------
elif net_type == 'swinir':
from models.network_swinir import SwinIR as net
netG = net(upscale=opt_net['upscale'],
in_chans=opt_net['in_chans'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
img_range=opt_net['img_range'],
depths=opt_net['depths'],
embed_dim=opt_net['embed_dim'],
num_heads=opt_net['num_heads'],
mlp_ratio=opt_net['mlp_ratio'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'],
talking_heads=opt_net['talking_heads'],
use_attn_fn=opt_net['attn_fn'],
head_scale=opt_net['head_scale'],
on_attn=opt_net['on_attn'],
use_mask=opt_net['use_mask'],
mask_ratio1=opt_net['mask_ratio1'],
mask_ratio2=opt_net['mask_ratio2'],
mask_is_diff=opt_net['mask_is_diff'],
type=opt_net['type'],
resi_scale=opt_net['resi_scale'],
opt=opt_net,
)
# ----------------------------------------
# SwinIR diff
# ----------------------------------------
elif net_type == 'swinir_diff':
from models.network_swinir_diff import SwinIR as net
netG = net(upscale=opt_net['upscale'],
in_chans=opt_net['in_chans'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
img_range=opt_net['img_range'],
depths=opt_net['depths'],
embed_dim=opt_net['embed_dim'],
num_heads=opt_net['num_heads'],
mlp_ratio=opt_net['mlp_ratio'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'],
talking_heads=opt_net['talking_heads'],
use_attn_fn=opt_net['attn_fn'],
head_scale=opt_net['head_scale'],
on_attn=opt_net['on_attn'],
use_mask=opt_net['use_mask'],
mask_ratio1=opt_net['mask_ratio1'],
mask_ratio2=opt_net['mask_ratio2'],
mask_is_diff=opt_net['mask_is_diff'],
type=opt_net['type'],
resi_scale=opt_net['resi_scale'],
opt=opt_net,
)
# ----------------------------------------
# SwinIR Dropout
# ----------------------------------------
elif net_type == 'swinir_dropout':
from models.network_swinir_dropout import SwinIR as net
netG = net(upscale=opt_net['upscale'],
in_chans=opt_net['in_chans'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
img_range=opt_net['img_range'],
depths=opt_net['depths'],
embed_dim=opt_net['embed_dim'],
num_heads=opt_net['num_heads'],
mlp_ratio=opt_net['mlp_ratio'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'],
talking_heads=opt_net['talking_heads'],
use_attn_fn=opt_net['attn_fn'],
head_scale=opt_net['head_scale'],
on_attn=opt_net['on_attn'],
use_mask=opt_net['use_mask'],
mask_ratio1=opt_net['mask_ratio1'],
mask_ratio2=opt_net['mask_ratio2'],
mask_is_diff=opt_net['mask_is_diff'],
type=opt_net['type'],
resi_scale=opt_net['resi_scale'],
opt=opt_net,
)
# ----------------------------------------
# SwinIR Dropout residual
# ----------------------------------------
elif net_type == 'swinir_dropout':
from models.network_swinir_dropout_residual import SwinIR as net
netG = net(upscale=opt_net['upscale'],
in_chans=opt_net['in_chans'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
img_range=opt_net['img_range'],
depths=opt_net['depths'],
embed_dim=opt_net['embed_dim'],
num_heads=opt_net['num_heads'],
mlp_ratio=opt_net['mlp_ratio'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'],
talking_heads=opt_net['talking_heads'],
use_attn_fn=opt_net['attn_fn'],
head_scale=opt_net['head_scale'],
on_attn=opt_net['on_attn'],
use_mask=opt_net['use_mask'],
mask_ratio1=opt_net['mask_ratio1'],
mask_ratio2=opt_net['mask_ratio2'],
mask_is_diff=opt_net['mask_is_diff'],
type=opt_net['type'],
resi_scale=opt_net['resi_scale'],
opt=opt_net,
)
# ----------------------------------------
# SwinIR residual
# ----------------------------------------
elif net_type == 'swinir_residual':
from models.network_swinir_residual import SwinIR as net
netG = net(upscale=opt_net['upscale'],
in_chans=opt_net['in_chans'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
img_range=opt_net['img_range'],
depths=opt_net['depths'],
embed_dim=opt_net['embed_dim'],
num_heads=opt_net['num_heads'],
mlp_ratio=opt_net['mlp_ratio'],
upsampler=opt_net['upsampler'],
resi_connection=opt_net['resi_connection'],
talking_heads=opt_net['talking_heads'],
use_attn_fn=opt_net['attn_fn'],
head_scale=opt_net['head_scale'],
on_attn=opt_net['on_attn'],
use_mask=opt_net['use_mask'],
mask_ratio1=opt_net['mask_ratio1'],
mask_ratio2=opt_net['mask_ratio2'],
mask_is_diff=opt_net['mask_is_diff'],
type=opt_net['type'],
resi_scale=opt_net['resi_scale'],
opt=opt_net,
)
# ----------------------------------------
# VRTopt_net
# ----------------------------------------
elif net_type == 'vrt':
from models.network_vrt import VRT as net
netG = net(upscale=opt_net['upscale'],
img_size=opt_net['img_size'],
window_size=opt_net['window_size'],
depths=opt_net['depths'],
indep_reconsts=opt_net['indep_reconsts'],
embed_dims=opt_net['embed_dims'],
num_heads=opt_net['num_heads'],
spynet_path=opt_net['spynet_path'],
pa_frames=opt_net['pa_frames'],
deformable_groups=opt_net['deformable_groups'],
nonblind_denoising=opt_net['nonblind_denoising'],
use_checkpoint_attn=opt_net['use_checkpoint_attn'],
use_checkpoint_ffn=opt_net['use_checkpoint_ffn'],
no_checkpoint_attn_blocks=opt_net['no_checkpoint_attn_blocks'],
no_checkpoint_ffn_blocks=opt_net['no_checkpoint_ffn_blocks'])
# ----------------------------------------
# others
# ----------------------------------------
# TODO
else:
raise NotImplementedError('netG [{:s}] is not found.'.format(net_type))
# ----------------------------------------
# initialize weights
# ----------------------------------------
if opt['is_train']:
init_weights(netG,
init_type=opt_net['init_type'],
init_bn_type=opt_net['init_bn_type'],
gain=opt_net['init_gain'])
return netG
# --------------------------------------------
# Discriminator, netD, D
# --------------------------------------------
def define_D(opt):
opt_net = opt['netD']
net_type = opt_net['net_type']
# ----------------------------------------
# discriminator_vgg_96
# ----------------------------------------
if net_type == 'discriminator_vgg_96':
from models.network_discriminator import Discriminator_VGG_96 as discriminator
netD = discriminator(in_nc=opt_net['in_nc'],
base_nc=opt_net['base_nc'],
ac_type=opt_net['act_mode'])
# ----------------------------------------
# discriminator_vgg_128
# ----------------------------------------
elif net_type == 'discriminator_vgg_128':
from models.network_discriminator import Discriminator_VGG_128 as discriminator
netD = discriminator(in_nc=opt_net['in_nc'],
base_nc=opt_net['base_nc'],
ac_type=opt_net['act_mode'])
# ----------------------------------------
# discriminator_vgg_192
# ----------------------------------------
elif net_type == 'discriminator_vgg_192':
from models.network_discriminator import Discriminator_VGG_192 as discriminator
netD = discriminator(in_nc=opt_net['in_nc'],
base_nc=opt_net['base_nc'],
ac_type=opt_net['act_mode'])
# ----------------------------------------
# discriminator_vgg_128_SN
# ----------------------------------------
elif net_type == 'discriminator_vgg_128_SN':
from models.network_discriminator import Discriminator_VGG_128_SN as discriminator
netD = discriminator()
elif net_type == 'discriminator_patchgan':
from models.network_discriminator import Discriminator_PatchGAN as discriminator
netD = discriminator(input_nc=opt_net['in_nc'],
ndf=opt_net['base_nc'],
n_layers=opt_net['n_layers'],
norm_type=opt_net['norm_type'])
elif net_type == 'discriminator_unet':
from models.network_discriminator import Discriminator_UNet as discriminator
netD = discriminator(input_nc=opt_net['in_nc'],
ndf=opt_net['base_nc'])
else:
raise NotImplementedError('netD [{:s}] is not found.'.format(net_type))
# ----------------------------------------
# initialize weights
# ----------------------------------------
init_weights(netD,
init_type=opt_net['init_type'],
init_bn_type=opt_net['init_bn_type'],
gain=opt_net['init_gain'])
return netD
# --------------------------------------------
# VGGfeature, netF, F
# --------------------------------------------
def define_F(opt, use_bn=False):
device = torch.device('cuda' if opt['gpu_ids'] else 'cpu')
from models.network_feature import VGGFeatureExtractor
# pytorch pretrained VGG19-54, before ReLU.
if use_bn:
feature_layer = 49
else:
feature_layer = 34
netF = VGGFeatureExtractor(feature_layer=feature_layer,
use_bn=use_bn,
use_input_norm=True,
device=device)
netF.eval() # No need to train, but need BP to input
return netF
"""
# --------------------------------------------
# weights initialization
# --------------------------------------------
"""
def init_weights(net, init_type='xavier_uniform', init_bn_type='uniform', gain=1):
"""
# Kai Zhang, https://github.com/cszn/KAIR
#
# Args:
# init_type:
# default, none: pass init_weights
# normal; normal; xavier_normal; xavier_uniform;
# kaiming_normal; kaiming_uniform; orthogonal
# init_bn_type:
# uniform; constant
# gain:
# 0.2
"""
def init_fn(m, init_type='xavier_uniform', init_bn_type='uniform', gain=1):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
if init_type == 'normal':
init.normal_(m.weight.data, 0, 0.1)
m.weight.data.clamp_(-1, 1).mul_(gain)
elif init_type == 'uniform':
init.uniform_(m.weight.data, -0.2, 0.2)
m.weight.data.mul_(gain)
elif init_type == 'xavier_normal':
init.xavier_normal_(m.weight.data, gain=gain)
m.weight.data.clamp_(-1, 1)
elif init_type == 'xavier_uniform':
init.xavier_uniform_(m.weight.data, gain=gain)
elif init_type == 'kaiming_normal':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
m.weight.data.clamp_(-1, 1).mul_(gain)
elif init_type == 'kaiming_uniform':
init.kaiming_uniform_(m.weight.data, a=0, mode='fan_in', nonlinearity='relu')
m.weight.data.mul_(gain)
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('Initialization method [{:s}] is not implemented'.format(init_type))
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm2d') != -1:
if init_bn_type == 'uniform': # preferred
if m.affine:
init.uniform_(m.weight.data, 0.1, 1.0)
init.constant_(m.bias.data, 0.0)
elif init_bn_type == 'constant':
if m.affine:
init.constant_(m.weight.data, 1.0)
init.constant_(m.bias.data, 0.0)
else:
raise NotImplementedError('Initialization method [{:s}] is not implemented'.format(init_bn_type))
if init_type not in ['default', 'none']:
print('Initialization method [{:s} + {:s}], gain is [{:.2f}]'.format(init_type, init_bn_type, gain))
fn = functools.partial(init_fn, init_type=init_type, init_bn_type=init_bn_type, gain=gain)
net.apply(fn)
else:
print('Pass this initialization! Initialization was done during network definition!')
| 22,764 | 39.435169 | 113 | py |
MaskedDenoising | MaskedDenoising-main/models/network_ridnet.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class MeanShift(nn.Module):
def __init__(self, mean_rgb, sub):
super(MeanShift, self).__init__()
sign = -1 if sub else 1
r = mean_rgb[0] * sign
g = mean_rgb[1] * sign
b = mean_rgb[2] * sign
self.shifter = nn.Conv2d(3, 3, 1, 1, 0)
self.shifter.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.shifter.bias.data = torch.Tensor([r, g, b])
# Freeze the mean shift layer
for params in self.shifter.parameters():
params.requires_grad = False
def forward(self, x):
x = self.shifter(x)
return x
class Merge_Run(nn.Module):
def __init__(self,
in_channels, out_channels,
ksize=3, stride=1, pad=1, dilation=1):
super(Merge_Run, self).__init__()
self.body1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True)
)
self.body2 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, 2, 2),
nn.ReLU(inplace=True)
)
self.body3 = nn.Sequential(
nn.Conv2d(in_channels*2, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True)
)
init_weights(self.modules)
def forward(self, x):
out1 = self.body1(x)
out2 = self.body2(x)
c = torch.cat([out1, out2], dim=1)
c_out = self.body3(c)
out = c_out + x
return out
class Merge_Run_dual(nn.Module):
def __init__(self,
in_channels, out_channels,
ksize=3, stride=1, pad=1, dilation=1):
super(Merge_Run_dual, self).__init__()
self.body1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, ksize, stride, 2, 2),
nn.ReLU(inplace=True)
)
self.body2 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, 3, 3),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, ksize, stride, 4, 4),
nn.ReLU(inplace=True)
)
self.body3 = nn.Sequential(
nn.Conv2d(in_channels*2, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True)
)
init_weights(self.modules)
def forward(self, x):
out1 = self.body1(x)
out2 = self.body2(x)
c = torch.cat([out1, out2], dim=1)
c_out = self.body3(c)
out = c_out + x
return out
def init_weights(modules):
pass
class BasicBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
ksize=3, stride=1, pad=1):
super(BasicBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
nn.ReLU(inplace=True)
)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
class BasicBlockSig(nn.Module):
def __init__(self,
in_channels, out_channels,
ksize=3, stride=1, pad=1):
super(BasicBlockSig, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, ksize, stride, pad),
nn.Sigmoid()
)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
class ResidualBlock(nn.Module):
def __init__(self,
in_channels, out_channels):
super(ResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1),
)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
out = F.relu(out + x)
return out
class EResidualBlock(nn.Module):
def __init__(self,
in_channels, out_channels,
group=1):
super(EResidualBlock, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, groups=group),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, 1, 1, groups=group),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 1, 1, 0),
)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
out = F.relu(out + x)
return out
class UpsampleBlock(nn.Module):
def __init__(self,
n_channels, scale, multi_scale,
group=1):
super(UpsampleBlock, self).__init__()
if multi_scale:
self.up2 = _UpsampleBlock(n_channels, scale=2, group=group)
self.up3 = _UpsampleBlock(n_channels, scale=3, group=group)
self.up4 = _UpsampleBlock(n_channels, scale=4, group=group)
else:
self.up = _UpsampleBlock(n_channels, scale=scale, group=group)
self.multi_scale = multi_scale
def forward(self, x, scale):
if self.multi_scale:
if scale == 2:
return self.up2(x)
elif scale == 3:
return self.up3(x)
elif scale == 4:
return self.up4(x)
else:
return self.up(x)
class _UpsampleBlock(nn.Module):
def __init__(self,
n_channels, scale,
group=1):
super(_UpsampleBlock, self).__init__()
modules = []
if scale == 2 or scale == 4 or scale == 8:
for _ in range(int(math.log(scale, 2))):
modules += [nn.Conv2d(n_channels, 4*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(2)]
elif scale == 3:
modules += [nn.Conv2d(n_channels, 9*n_channels, 3, 1, 1, groups=group), nn.ReLU(inplace=True)]
modules += [nn.PixelShuffle(3)]
self.body = nn.Sequential(*modules)
init_weights(self.modules)
def forward(self, x):
out = self.body(x)
return out
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, bias=False,
bn=True, act=nn.ReLU(True)):
m = [nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), stride=stride, bias=bias)
]
if bn: m.append(nn.BatchNorm2d(out_channels))
if act is not None: m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if i == 0: m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feat, 4 * n_feat, 3, bias))
m.append(nn.PixelShuffle(2))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
elif scale == 3:
m.append(conv(n_feat, 9 * n_feat, 3, bias))
m.append(nn.PixelShuffle(3))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.c1 = BasicBlock(channel , channel // reduction, 1, 1, 0)
self.c2 = BasicBlockSig(channel // reduction, channel , 1, 1, 0)
def forward(self, x):
y = self.avg_pool(x)
y1 = self.c1(y)
y2 = self.c2(y1)
return x * y2
class Block(nn.Module):
def __init__(self, in_channels, out_channels, group=1):
super(Block, self).__init__()
self.r1 = Merge_Run_dual(in_channels, out_channels)
self.r2 = ResidualBlock(in_channels, out_channels)
self.r3 = EResidualBlock(in_channels, out_channels)
#self.g = BasicBlock(in_channels, out_channels, 1, 1, 0)
self.ca = CALayer(in_channels)
def forward(self, x):
r1 = self.r1(x)
r2 = self.r2(r1)
r3 = self.r3(r2)
#g = self.g(r3)
out = self.ca(r3)
return out
class RIDNET(nn.Module):
def __init__(self):
super(RIDNET, self).__init__()
n_feats = 64
kernel_size = 3
reduction = 16
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = MeanShift(1, rgb_mean, rgb_std)
self.add_mean = MeanShift(1, rgb_mean, rgb_std, 1)
self.head = BasicBlock(3, n_feats, kernel_size, 1, 1)
self.b1 = Block(n_feats, n_feats)
self.b2 = Block(n_feats, n_feats)
self.b3 = Block(n_feats, n_feats)
self.b4 = Block(n_feats, n_feats)
self.tail = nn.Conv2d(n_feats, 3, kernel_size, 1, 1, 1)
def forward(self, x):
s = self.sub_mean(x)
h = self.head(s)
b1 = self.b1(h)
b2 = self.b2(b1)
b3 = self.b3(b2)
b_out = self.b4(b3)
res = self.tail(b_out)
out = self.add_mean(res)
f_out = out + x
return f_out | 10,852 | 28.815934 | 110 | py |
MaskedDenoising | MaskedDenoising-main/models/network_dncnn.py |
import torch.nn as nn
import models.basicblock as B
"""
# --------------------------------------------
# DnCNN (20 conv layers)
# FDnCNN (20 conv layers)
# IRCNN (7 conv layers)
# --------------------------------------------
# References:
@article{zhang2017beyond,
title={Beyond a gaussian denoiser: Residual learning of deep cnn for image denoising},
author={Zhang, Kai and Zuo, Wangmeng and Chen, Yunjin and Meng, Deyu and Zhang, Lei},
journal={IEEE Transactions on Image Processing},
volume={26},
number={7},
pages={3142--3155},
year={2017},
publisher={IEEE}
}
@article{zhang2018ffdnet,
title={FFDNet: Toward a fast and flexible solution for CNN-based image denoising},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
journal={IEEE Transactions on Image Processing},
volume={27},
number={9},
pages={4608--4622},
year={2018},
publisher={IEEE}
}
# --------------------------------------------
"""
# --------------------------------------------
# DnCNN
# --------------------------------------------
class DnCNN(nn.Module):
def __init__(self, in_nc=1, out_nc=1, nc=64, nb=17, act_mode='BR'):
"""
# ------------------------------------
in_nc: channel number of input
out_nc: channel number of output
nc: channel number
nb: total number of conv layers
act_mode: batch norm + activation function; 'BR' means BN+ReLU.
# ------------------------------------
Batch normalization and residual learning are
beneficial to Gaussian denoising (especially
for a single noise level).
The residual of a noisy image corrupted by additive white
Gaussian noise (AWGN) follows a constant
Gaussian distribution which stablizes batch
normalization during training.
# ------------------------------------
"""
super(DnCNN, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
bias = True
m_head = B.conv(in_nc, nc, mode='C'+act_mode[-1], bias=bias)
m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)]
m_tail = B.conv(nc, out_nc, mode='C', bias=bias)
self.model = B.sequential(m_head, *m_body, m_tail)
def forward(self, x):
n = self.model(x)
return x-n
# --------------------------------------------
# IRCNN denoiser
# --------------------------------------------
class IRCNN(nn.Module):
def __init__(self, in_nc=1, out_nc=1, nc=64):
"""
# ------------------------------------
denoiser of IRCNN
in_nc: channel number of input
out_nc: channel number of output
nc: channel number
nb: total number of conv layers
act_mode: batch norm + activation function; 'BR' means BN+ReLU.
# ------------------------------------
Batch normalization and residual learning are
beneficial to Gaussian denoising (especially
for a single noise level).
The residual of a noisy image corrupted by additive white
Gaussian noise (AWGN) follows a constant
Gaussian distribution which stablizes batch
normalization during training.
# ------------------------------------
"""
super(IRCNN, self).__init__()
L =[]
L.append(nn.Conv2d(in_channels=in_nc, out_channels=nc, kernel_size=3, stride=1, padding=1, dilation=1, bias=True))
L.append(nn.ReLU(inplace=True))
L.append(nn.Conv2d(in_channels=nc, out_channels=nc, kernel_size=3, stride=1, padding=2, dilation=2, bias=True))
L.append(nn.ReLU(inplace=True))
L.append(nn.Conv2d(in_channels=nc, out_channels=nc, kernel_size=3, stride=1, padding=3, dilation=3, bias=True))
L.append(nn.ReLU(inplace=True))
L.append(nn.Conv2d(in_channels=nc, out_channels=nc, kernel_size=3, stride=1, padding=4, dilation=4, bias=True))
L.append(nn.ReLU(inplace=True))
L.append(nn.Conv2d(in_channels=nc, out_channels=nc, kernel_size=3, stride=1, padding=3, dilation=3, bias=True))
L.append(nn.ReLU(inplace=True))
L.append(nn.Conv2d(in_channels=nc, out_channels=nc, kernel_size=3, stride=1, padding=2, dilation=2, bias=True))
L.append(nn.ReLU(inplace=True))
L.append(nn.Conv2d(in_channels=nc, out_channels=out_nc, kernel_size=3, stride=1, padding=1, dilation=1, bias=True))
self.model = B.sequential(*L)
def forward(self, x):
n = self.model(x)
return x-n
# --------------------------------------------
# FDnCNN
# --------------------------------------------
# Compared with DnCNN, FDnCNN has three modifications:
# 1) add noise level map as input
# 2) remove residual learning and BN
# 3) train with L1 loss
# may need more training time, but will not reduce the final PSNR too much.
# --------------------------------------------
class FDnCNN(nn.Module):
def __init__(self, in_nc=2, out_nc=1, nc=64, nb=20, act_mode='R'):
"""
in_nc: channel number of input
out_nc: channel number of output
nc: channel number
nb: total number of conv layers
act_mode: batch norm + activation function; 'BR' means BN+ReLU.
"""
super(FDnCNN, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
bias = True
m_head = B.conv(in_nc, nc, mode='C'+act_mode[-1], bias=bias)
m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)]
m_tail = B.conv(nc, out_nc, mode='C', bias=bias)
self.model = B.sequential(m_head, *m_body, m_tail)
def forward(self, x):
x = self.model(x)
return x
if __name__ == '__main__':
from utils import utils_model
import torch
model1 = DnCNN(in_nc=1, out_nc=1, nc=64, nb=20, act_mode='BR')
print(utils_model.describe_model(model1))
model2 = FDnCNN(in_nc=2, out_nc=1, nc=64, nb=20, act_mode='R')
print(utils_model.describe_model(model2))
x = torch.randn((1, 1, 240, 240))
x1 = model1(x)
print(x1.shape)
x = torch.randn((1, 2, 240, 240))
x2 = model2(x)
print(x2.shape)
# run models/network_dncnn.py
| 6,298 | 36.052941 | 123 | py |
MaskedDenoising | MaskedDenoising-main/models/model_plain.py | from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam
from models.select_network import define_G
from models.model_base import ModelBase
from models.loss import CharbonnierLoss
from models.loss_ssim import SSIMLoss
from utils.utils_model import test_mode
from utils.utils_regularizers import regularizer_orth, regularizer_clip
class ModelPlain(ModelBase):
"""Train with pixel loss"""
def __init__(self, opt):
super(ModelPlain, self).__init__(opt)
# ------------------------------------
# define network
# ------------------------------------
self.opt_train = self.opt['train'] # training option
self.netG = define_G(opt)
self.netG = self.model_to_device(self.netG)
if self.opt_train['E_decay'] > 0:
self.netE = define_G(opt).to(self.device).eval()
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
# ----------------------------------------
# initialize training
# ----------------------------------------
def init_train(self):
self.load() # load model
self.netG.train() # set training mode,for BN
self.define_loss() # define loss
self.define_optimizer() # define optimizer
self.load_optimizers() # load optimizer
self.define_scheduler() # define scheduler
self.log_dict = OrderedDict() # log
# ----------------------------------------
# load pre-trained G model
# ----------------------------------------
def load(self):
load_path_G = self.opt['path']['pretrained_netG']
if load_path_G is not None:
print('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, strict=self.opt_train['G_param_strict'], param_key='params')
load_path_E = self.opt['path']['pretrained_netE']
if self.opt_train['E_decay'] > 0:
if load_path_E is not None:
print('Loading model for E [{:s}] ...'.format(load_path_E))
self.load_network(load_path_E, self.netE, strict=self.opt_train['E_param_strict'], param_key='params_ema')
else:
print('Copying model for E ...')
self.update_E(0)
self.netE.eval()
# ----------------------------------------
# load optimizer
# ----------------------------------------
def load_optimizers(self):
load_path_optimizerG = self.opt['path']['pretrained_optimizerG']
if load_path_optimizerG is not None and self.opt_train['G_optimizer_reuse']:
print('Loading optimizerG [{:s}] ...'.format(load_path_optimizerG))
self.load_optimizer(load_path_optimizerG, self.G_optimizer)
# ----------------------------------------
# save model / optimizer(optional)
# ----------------------------------------
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
if self.opt_train['E_decay'] > 0:
self.save_network(self.save_dir, self.netE, 'E', iter_label)
if self.opt_train['G_optimizer_reuse']:
self.save_optimizer(self.save_dir, self.G_optimizer, 'optimizerG', iter_label)
# ----------------------------------------
# define loss
# ----------------------------------------
def define_loss(self):
G_lossfn_type = self.opt_train['G_lossfn_type']
if G_lossfn_type == 'l1':
self.G_lossfn = nn.L1Loss().to(self.device)
elif G_lossfn_type == 'l2':
self.G_lossfn = nn.MSELoss().to(self.device)
elif G_lossfn_type == 'l2sum':
self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device)
elif G_lossfn_type == 'ssim':
self.G_lossfn = SSIMLoss().to(self.device)
elif G_lossfn_type == 'charbonnier':
self.G_lossfn = CharbonnierLoss(self.opt_train['G_charbonnier_eps']).to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type))
self.G_lossfn_weight = self.opt_train['G_lossfn_weight']
# ----------------------------------------
# define optimizer
# ----------------------------------------
def define_optimizer(self):
G_optim_params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
G_optim_params.append(v)
else:
print('Params [{:s}] will not optimize.'.format(k))
if self.opt_train['G_optimizer_type'] == 'adam':
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'],
betas=self.opt_train['G_optimizer_betas'],
weight_decay=self.opt_train['G_optimizer_wd'])
else:
raise NotImplementedError
# ----------------------------------------
# define scheduler, only "MultiStepLR"
# ----------------------------------------
def define_scheduler(self):
if self.opt_train['G_scheduler_type'] == 'MultiStepLR':
self.schedulers.append(lr_scheduler.MultiStepLR(self.G_optimizer,
self.opt_train['G_scheduler_milestones'],
self.opt_train['G_scheduler_gamma']
))
elif self.opt_train['G_scheduler_type'] == 'CosineAnnealingWarmRestarts':
self.schedulers.append(lr_scheduler.CosineAnnealingWarmRestarts(self.G_optimizer,
self.opt_train['G_scheduler_periods'],
self.opt_train['G_scheduler_restart_weights'],
self.opt_train['G_scheduler_eta_min']
))
else:
raise NotImplementedError
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.L = data['L'].to(self.device)
if need_H:
self.H = data['H'].to(self.device)
if 'mask' in data:
# print(data['mask'])
# print(type(data['mask']))
# print(data['mask'].shape)
self.mask = data['mask'].to(self.device)
self.is_mask = True
else:
self.is_mask = False
# ----------------------------------------
# feed L to netG
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L)
if self.is_mask:
self.E = torch.mul(self.E, self.mask)
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
self.G_optimizer.zero_grad()
self.netG_forward()
G_loss = self.G_lossfn_weight * self.G_lossfn(self.E, self.H)
G_loss.backward()
# ------------------------------------
# clip_grad
# ------------------------------------
# `clip_grad_norm` helps prevent the exploding gradient problem.
G_optimizer_clipgrad = self.opt_train['G_optimizer_clipgrad'] if self.opt_train['G_optimizer_clipgrad'] else 0
if G_optimizer_clipgrad > 0:
torch.nn.utils.clip_grad_norm_(self.parameters(), max_norm=self.opt_train['G_optimizer_clipgrad'], norm_type=2)
self.G_optimizer.step()
# ------------------------------------
# regularizer
# ------------------------------------
G_regularizer_orthstep = self.opt_train['G_regularizer_orthstep'] if self.opt_train['G_regularizer_orthstep'] else 0
if G_regularizer_orthstep > 0 and current_step % G_regularizer_orthstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_orth)
G_regularizer_clipstep = self.opt_train['G_regularizer_clipstep'] if self.opt_train['G_regularizer_clipstep'] else 0
if G_regularizer_clipstep > 0 and current_step % G_regularizer_clipstep == 0 and current_step % self.opt['train']['checkpoint_save'] != 0:
self.netG.apply(regularizer_clip)
# self.log_dict['G_loss'] = G_loss.item()/self.E.size()[0] # if `reduction='sum'`
self.log_dict['G_loss'] = G_loss.item()
if self.opt_train['E_decay'] > 0:
self.update_E(self.opt_train['E_decay'])
# ----------------------------------------
# test / inference
# ----------------------------------------
def test(self):
self.netG.eval()
with torch.no_grad():
self.netG_forward()
self.netG.train()
# ----------------------------------------
# test / inference x8
# ----------------------------------------
def testx8(self):
self.netG.eval()
with torch.no_grad():
self.E = test_mode(self.netG, self.L, mode=3, sf=self.opt['scale'], modulo=1)
self.netG.train()
# ----------------------------------------
# get log_dict
# ----------------------------------------
def current_log(self):
return self.log_dict
# ----------------------------------------
# get L, E, H image
# ----------------------------------------
def current_visuals(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float().cpu()
out_dict['E'] = self.E.detach()[0].float().cpu()
if need_H:
out_dict['H'] = self.H.detach()[0].float().cpu()
return out_dict
# ----------------------------------------
# get L, E, H batch images
# ----------------------------------------
def current_results(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach().float().cpu()
out_dict['E'] = self.E.detach().float().cpu()
if need_H:
out_dict['H'] = self.H.detach().float().cpu()
return out_dict
"""
# ----------------------------------------
# Information of netG
# ----------------------------------------
"""
# ----------------------------------------
# print network
# ----------------------------------------
def print_network(self):
msg = self.describe_network(self.netG)
print(msg)
# ----------------------------------------
# print params
# ----------------------------------------
def print_params(self):
msg = self.describe_params(self.netG)
print(msg)
# ----------------------------------------
# network information
# ----------------------------------------
def info_network(self):
msg = self.describe_network(self.netG)
return msg
# ----------------------------------------
# params information
# ----------------------------------------
def info_params(self):
msg = self.describe_params(self.netG)
return msg
| 11,698 | 40.193662 | 146 | py |
MaskedDenoising | MaskedDenoising-main/models/loss.py | import torch
import torch.nn as nn
import torchvision
from torch.nn import functional as F
from torch import autograd as autograd
"""
Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2*): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace)
(7*): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace)
(16*): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(17): ReLU(inplace)
(18): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(19): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace)
(23): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(24): ReLU(inplace)
(25*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(26): ReLU(inplace)
(27): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace)
(30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(31): ReLU(inplace)
(32): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(33): ReLU(inplace)
(34*): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(35): ReLU(inplace)
(36): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
"""
# --------------------------------------------
# Perceptual loss
# --------------------------------------------
class VGGFeatureExtractor(nn.Module):
def __init__(self, feature_layer=[2,7,16,25,34], use_input_norm=True, use_range_norm=False):
super(VGGFeatureExtractor, self).__init__()
'''
use_input_norm: If True, x: [0, 1] --> (x - mean) / std
use_range_norm: If True, x: [0, 1] --> x: [-1, 1]
'''
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
self.use_range_norm = use_range_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.list_outputs = isinstance(feature_layer, list)
if self.list_outputs:
self.features = nn.Sequential()
feature_layer = [-1] + feature_layer
for i in range(len(feature_layer)-1):
self.features.add_module('child'+str(i), nn.Sequential(*list(model.features.children())[(feature_layer[i]+1):(feature_layer[i+1]+1)]))
else:
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
print(self.features)
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_range_norm:
x = (x + 1.0) / 2.0
if self.use_input_norm:
x = (x - self.mean) / self.std
if self.list_outputs:
output = []
for child_model in self.features.children():
x = child_model(x)
output.append(x.clone())
return output
else:
return self.features(x)
class PerceptualLoss(nn.Module):
"""VGG Perceptual loss
"""
def __init__(self, feature_layer=[2,7,16,25,34], weights=[0.1,0.1,1.0,1.0,1.0], lossfn_type='l1', use_input_norm=True, use_range_norm=False):
super(PerceptualLoss, self).__init__()
self.vgg = VGGFeatureExtractor(feature_layer=feature_layer, use_input_norm=use_input_norm, use_range_norm=use_range_norm)
self.lossfn_type = lossfn_type
self.weights = weights
if self.lossfn_type == 'l1':
self.lossfn = nn.L1Loss()
else:
self.lossfn = nn.MSELoss()
print(f'feature_layer: {feature_layer} with weights: {weights}')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x_vgg, gt_vgg = self.vgg(x), self.vgg(gt.detach())
loss = 0.0
if isinstance(x_vgg, list):
n = len(x_vgg)
for i in range(n):
loss += self.weights[i] * self.lossfn(x_vgg[i], gt_vgg[i])
else:
loss += self.lossfn(x_vgg, gt_vgg.detach())
return loss
# --------------------------------------------
# GAN loss: gan, ragan
# --------------------------------------------
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'gan' or self.gan_type == 'ragan':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
elif self.gan_type == 'softplusgan':
def softplusgan_loss(input, target):
# target is boolean
return F.softplus(-input).mean() if target else F.softplus(input).mean()
self.loss = softplusgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type in ['wgan', 'softplusgan']:
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
# --------------------------------------------
# TV loss
# --------------------------------------------
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
"""
Total variation loss
https://github.com/jxgu1016/Total_Variation_Loss.pytorch
Args:
tv_loss_weight (int):
"""
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
# --------------------------------------------
# Charbonnier loss
# --------------------------------------------
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-9):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
loss = torch.mean(torch.sqrt((diff * diff) + self.eps))
return loss
def r1_penalty(real_pred, real_img):
"""R1 regularization for discriminator. The core idea is to
penalize the gradient on real data alone: when the
generator distribution produces the true data distribution
and the discriminator is equal to 0 on the data manifold, the
gradient penalty ensures that the discriminator cannot create
a non-zero gradient orthogonal to the data manifold without
suffering a loss in the GAN game.
Ref:
Eq. 9 in Which training methods for GANs do actually converge.
"""
grad_real = autograd.grad(
outputs=real_pred.sum(), inputs=real_img, create_graph=True)[0]
grad_penalty = grad_real.pow(2).view(grad_real.shape[0], -1).sum(1).mean()
return grad_penalty
def g_path_regularize(fake_img, latents, mean_path_length, decay=0.01):
noise = torch.randn_like(fake_img) / math.sqrt(
fake_img.shape[2] * fake_img.shape[3])
grad = autograd.grad(
outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True)[0]
path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))
path_mean = mean_path_length + decay * (
path_lengths.mean() - mean_path_length)
path_penalty = (path_lengths - path_mean).pow(2).mean()
return path_penalty, path_lengths.detach().mean(), path_mean.detach()
def gradient_penalty_loss(discriminator, real_data, fake_data, weight=None):
"""Calculate gradient penalty for wgan-gp.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
weight (Tensor): Weight tensor. Default: None.
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.size(0)
alpha = real_data.new_tensor(torch.rand(batch_size, 1, 1, 1))
# interpolate between real_data and fake_data
interpolates = alpha * real_data + (1. - alpha) * fake_data
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if weight is not None:
gradients = gradients * weight
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
if weight is not None:
gradients_penalty /= torch.mean(weight)
return gradients_penalty
| 11,137 | 37.673611 | 150 | py |
MaskedDenoising | MaskedDenoising-main/models/network_feature.py | import torch
import torch.nn as nn
import torchvision
"""
# --------------------------------------------
# VGG Feature Extractor
# --------------------------------------------
"""
# --------------------------------------------
# VGG features
# Assume input range is [0, 1]
# --------------------------------------------
class VGGFeatureExtractor(nn.Module):
def __init__(self,
feature_layer=34,
use_bn=False,
use_input_norm=True,
device=torch.device('cpu')):
super(VGGFeatureExtractor, self).__init__()
if use_bn:
model = torchvision.models.vgg19_bn(pretrained=True)
else:
model = torchvision.models.vgg19(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
# [0.485-1, 0.456-1, 0.406-1] if input in range [-1,1]
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
# [0.229*2, 0.224*2, 0.225*2] if input in range [-1,1]
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.features.children())[:(feature_layer + 1)])
# No need to BP to variable
for k, v in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = (x - self.mean) / self.std
output = self.features(x)
return output
| 1,594 | 32.93617 | 93 | py |
MaskedDenoising | MaskedDenoising-main/models/network_usrnet_v1.py | import torch
import torch.nn as nn
import models.basicblock as B
import numpy as np
from utils import utils_image as util
import torch.fft
# for pytorch version >= 1.8.1
"""
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
@inproceedings{zhang2020deep,
title={Deep unfolding network for image super-resolution},
author={Zhang, Kai and Van Gool, Luc and Timofte, Radu},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={0--0},
year={2020}
}
# --------------------------------------------
"""
"""
# --------------------------------------------
# basic functions
# --------------------------------------------
"""
def splits(a, sf):
'''split a into sfxsf distinct blocks
Args:
a: NxCxWxH
sf: split factor
Returns:
b: NxCx(W/sf)x(H/sf)x(sf^2)
'''
b = torch.stack(torch.chunk(a, sf, dim=2), dim=4)
b = torch.cat(torch.chunk(b, sf, dim=3), dim=4)
return b
def p2o(psf, shape):
'''
Convert point-spread function to optical transfer function.
otf = p2o(psf) computes the Fast Fourier Transform (FFT) of the
point-spread function (PSF) array and creates the optical transfer
function (OTF) array that is not influenced by the PSF off-centering.
Args:
psf: NxCxhxw
shape: [H, W]
Returns:
otf: NxCxHxWx2
'''
otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
for axis, axis_size in enumerate(psf.shape[2:]):
otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
otf = torch.fft.fftn(otf, dim=(-2,-1))
#n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
#otf[..., 1][torch.abs(otf[..., 1]) < n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
return otf
def upsample(x, sf=3):
'''s-fold upsampler
Upsampling the spatial size by filling the new entries with zeros
x: tensor image, NxCxWxH
'''
st = 0
z = torch.zeros((x.shape[0], x.shape[1], x.shape[2]*sf, x.shape[3]*sf)).type_as(x)
z[..., st::sf, st::sf].copy_(x)
return z
def downsample(x, sf=3):
'''s-fold downsampler
Keeping the upper-left pixel for each distinct sfxsf patch and discarding the others
x: tensor image, NxCxWxH
'''
st = 0
return x[..., st::sf, st::sf]
def downsample_np(x, sf=3):
st = 0
return x[st::sf, st::sf, ...]
"""
# --------------------------------------------
# (1) Prior module; ResUNet: act as a non-blind denoiser
# x_k = P(z_k, beta_k)
# --------------------------------------------
"""
class ResUNet(nn.Module):
def __init__(self, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode='R', downsample_mode='strideconv', upsample_mode='convtranspose'):
super(ResUNet, self).__init__()
self.m_head = B.conv(in_nc, nc[0], bias=False, mode='C')
# downsample
if downsample_mode == 'avgpool':
downsample_block = B.downsample_avgpool
elif downsample_mode == 'maxpool':
downsample_block = B.downsample_maxpool
elif downsample_mode == 'strideconv':
downsample_block = B.downsample_strideconv
else:
raise NotImplementedError('downsample mode [{:s}] is not found'.format(downsample_mode))
self.m_down1 = B.sequential(*[B.ResBlock(nc[0], nc[0], bias=False, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[0], nc[1], bias=False, mode='2'))
self.m_down2 = B.sequential(*[B.ResBlock(nc[1], nc[1], bias=False, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[1], nc[2], bias=False, mode='2'))
self.m_down3 = B.sequential(*[B.ResBlock(nc[2], nc[2], bias=False, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[2], nc[3], bias=False, mode='2'))
self.m_body = B.sequential(*[B.ResBlock(nc[3], nc[3], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
# upsample
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
self.m_up3 = B.sequential(upsample_block(nc[3], nc[2], bias=False, mode='2'), *[B.ResBlock(nc[2], nc[2], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_up2 = B.sequential(upsample_block(nc[2], nc[1], bias=False, mode='2'), *[B.ResBlock(nc[1], nc[1], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_up1 = B.sequential(upsample_block(nc[1], nc[0], bias=False, mode='2'), *[B.ResBlock(nc[0], nc[0], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_tail = B.conv(nc[0], out_nc, bias=False, mode='C')
def forward(self, x):
h, w = x.size()[-2:]
paddingBottom = int(np.ceil(h/8)*8-h)
paddingRight = int(np.ceil(w/8)*8-w)
x = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x1 = self.m_head(x)
x2 = self.m_down1(x1)
x3 = self.m_down2(x2)
x4 = self.m_down3(x3)
x = self.m_body(x4)
x = self.m_up3(x+x4)
x = self.m_up2(x+x3)
x = self.m_up1(x+x2)
x = self.m_tail(x+x1)
x = x[..., :h, :w]
return x
"""
# --------------------------------------------
# (2) Data module, closed-form solution
# It is a trainable-parameter-free module ^_^
# z_k = D(x_{k-1}, s, k, y, alpha_k)
# some can be pre-calculated
# --------------------------------------------
"""
class DataNet(nn.Module):
def __init__(self):
super(DataNet, self).__init__()
def forward(self, x, FB, FBC, F2B, FBFy, alpha, sf):
FR = FBFy + torch.fft.fftn(alpha*x, dim=(-2,-1))
x1 = FB.mul(FR)
FBR = torch.mean(splits(x1, sf), dim=-1, keepdim=False)
invW = torch.mean(splits(F2B, sf), dim=-1, keepdim=False)
invWBR = FBR.div(invW + alpha)
FCBinvWBR = FBC*invWBR.repeat(1, 1, sf, sf)
FX = (FR-FCBinvWBR)/alpha
Xest = torch.real(torch.fft.ifftn(FX, dim=(-2,-1)))
return Xest
"""
# --------------------------------------------
# (3) Hyper-parameter module
# --------------------------------------------
"""
class HyPaNet(nn.Module):
def __init__(self, in_nc=2, out_nc=8, channel=64):
super(HyPaNet, self).__init__()
self.mlp = nn.Sequential(
nn.Conv2d(in_nc, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, out_nc, 1, padding=0, bias=True),
nn.Softplus())
def forward(self, x):
x = self.mlp(x) + 1e-6
return x
"""
# --------------------------------------------
# main USRNet
# deep unfolding super-resolution network
# --------------------------------------------
"""
class USRNet(nn.Module):
def __init__(self, n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode='R', downsample_mode='strideconv', upsample_mode='convtranspose'):
super(USRNet, self).__init__()
self.d = DataNet()
self.p = ResUNet(in_nc=in_nc, out_nc=out_nc, nc=nc, nb=nb, act_mode=act_mode, downsample_mode=downsample_mode, upsample_mode=upsample_mode)
self.h = HyPaNet(in_nc=2, out_nc=n_iter*2, channel=h_nc)
self.n = n_iter
def forward(self, x, k, sf, sigma):
'''
x: tensor, NxCxWxH
k: tensor, Nx(1,3)xwxh
sf: integer, 1
sigma: tensor, Nx1x1x1
'''
# initialization & pre-calculation
w, h = x.shape[-2:]
FB = p2o(k, (w*sf, h*sf))
FBC = torch.conj(FB)
F2B = torch.pow(torch.abs(FB), 2)
STy = upsample(x, sf=sf)
FBFy = FBC*torch.fft.fftn(STy, dim=(-2,-1))
x = nn.functional.interpolate(x, scale_factor=sf, mode='nearest')
# hyper-parameter, alpha & beta
ab = self.h(torch.cat((sigma, torch.tensor(sf).type_as(sigma).expand_as(sigma)), dim=1))
# unfolding
for i in range(self.n):
x = self.d(x, FB, FBC, F2B, FBFy, ab[:, i:i+1, ...], sf)
x = self.p(torch.cat((x, ab[:, i+self.n:i+self.n+1, ...].repeat(1, 1, x.size(2), x.size(3))), dim=1))
return x
| 8,627 | 31.681818 | 172 | py |
MaskedDenoising | MaskedDenoising-main/models/network_msrresnet.py | import math
import torch.nn as nn
import models.basicblock as B
import functools
import torch.nn.functional as F
import torch.nn.init as init
"""
# --------------------------------------------
# modified SRResNet
# -- MSRResNet0 (v0.0)
# -- MSRResNet1 (v0.1)
# --------------------------------------------
References:
@inproceedings{wang2018esrgan,
title={Esrgan: Enhanced super-resolution generative adversarial networks},
author={Wang, Xintao and Yu, Ke and Wu, Shixiang and Gu, Jinjin and Liu, Yihao and Dong, Chao and Qiao, Yu and Change Loy, Chen},
booktitle={European Concerence on Computer Vision (ECCV)},
pages={0--0},
year={2018}
}
@inproceedings{ledig2017photo,
title={Photo-realistic single image super-resolution using a generative adversarial network},
author={Ledig, Christian and Theis, Lucas and Husz{\'a}r, Ferenc and Caballero, Jose and Cunningham, Andrew and Acosta, Alejandro and Aitken, Andrew and Tejani, Alykhan and Totz, Johannes and Wang, Zehan and others},
booktitle={IEEE concerence on computer vision and pattern recognition},
pages={4681--4690},
year={2017}
}
# --------------------------------------------
"""
# --------------------------------------------
# modified SRResNet v0.0
# https://github.com/xinntao/ESRGAN
# --------------------------------------------
class MSRResNet0(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nc=64, nb=16, upscale=4, act_mode='R', upsample_mode='upconv'):
"""
in_nc: channel number of input
out_nc: channel number of output
nc: channel number
nb: number of residual blocks
upscale: up-scale factor
act_mode: activation function
upsample_mode: 'upconv' | 'pixelshuffle' | 'convtranspose'
"""
super(MSRResNet0, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
m_head = B.conv(in_nc, nc, mode='C')
m_body = [B.ResBlock(nc, nc, mode='C'+act_mode+'C') for _ in range(nb)]
m_body.append(B.conv(nc, nc, mode='C'))
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
m_uper = upsample_block(nc, nc, mode='3'+act_mode)
else:
m_uper = [upsample_block(nc, nc, mode='2'+act_mode) for _ in range(n_upscale)]
H_conv0 = B.conv(nc, nc, mode='C'+act_mode)
H_conv1 = B.conv(nc, out_nc, bias=False, mode='C')
m_tail = B.sequential(H_conv0, H_conv1)
self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper, m_tail)
def forward(self, x):
x = self.model(x)
return x
# --------------------------------------------
# modified SRResNet v0.1
# https://github.com/xinntao/ESRGAN
# --------------------------------------------
class MSRResNet1(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nc=64, nb=16, upscale=4, act_mode='R', upsample_mode='upconv'):
super(MSRResNet1, self).__init__()
self.upscale = upscale
self.conv_first = nn.Conv2d(in_nc, nc, 3, 1, 1, bias=True)
basic_block = functools.partial(ResidualBlock_noBN, nc=nc)
self.recon_trunk = make_layer(basic_block, nb)
# upsampling
if self.upscale == 2:
self.upconv1 = nn.Conv2d(nc, nc * 4, 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(2)
elif self.upscale == 3:
self.upconv1 = nn.Conv2d(nc, nc * 9, 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(3)
elif self.upscale == 4:
self.upconv1 = nn.Conv2d(nc, nc * 4, 3, 1, 1, bias=True)
self.upconv2 = nn.Conv2d(nc, nc * 4, 3, 1, 1, bias=True)
self.pixel_shuffle = nn.PixelShuffle(2)
self.HRconv = nn.Conv2d(nc, nc, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nc, out_nc, 3, 1, 1, bias=True)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# initialization
initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last], 0.1)
if self.upscale == 4:
initialize_weights(self.upconv2, 0.1)
def forward(self, x):
fea = self.lrelu(self.conv_first(x))
out = self.recon_trunk(fea)
if self.upscale == 4:
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
out = self.lrelu(self.pixel_shuffle(self.upconv2(out)))
elif self.upscale == 3 or self.upscale == 2:
out = self.lrelu(self.pixel_shuffle(self.upconv1(out)))
out = self.conv_last(self.lrelu(self.HRconv(out)))
base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
out += base
return out
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale # for residual block
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
def make_layer(block, n_layers):
layers = []
for _ in range(n_layers):
layers.append(block())
return nn.Sequential(*layers)
class ResidualBlock_noBN(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, nc=64):
super(ResidualBlock_noBN, self).__init__()
self.conv1 = nn.Conv2d(nc, nc, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(nc, nc, 3, 1, 1, bias=True)
# initialization
initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = F.relu(self.conv1(x), inplace=True)
out = self.conv2(out)
return identity + out
| 6,718 | 35.715847 | 218 | py |
MaskedDenoising | MaskedDenoising-main/models/network_mirnet.py | """
## Learning Enriched Features for Real Image Restoration and Enhancement
## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao
## ECCV 2020
## https://arxiv.org/abs/2003.06792
"""
# --- Imports --- #
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# from pdb import set_trace as stx
# from utils.antialias import Downsample as downsamp
class downsamp(nn.Module):
def __init__(self, pad_type='reflect', filt_size=3, stride=2, channels=None, pad_off=0):
super(downsamp, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2)), int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2))]
self.pad_sizes = [pad_size+pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride-1)/2.)
self.channels = channels
# print('Filter size [%i]'%filt_size)
if(self.filt_size==1):
a = np.array([1.,])
elif(self.filt_size==2):
a = np.array([1., 1.])
elif(self.filt_size==3):
a = np.array([1., 2., 1.])
elif(self.filt_size==4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size==5):
a = np.array([1., 4., 6., 4., 1.])
elif(self.filt_size==6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(self.filt_size==7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:,None]*a[None,:])
filt = filt/torch.sum(filt)
self.register_buffer('filt', filt[None,None,:,:].repeat((self.channels,1,1,1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size==1):
if(self.pad_off==0):
return inp[:,:,::self.stride,::self.stride]
else:
return self.pad(inp)[:,:,::self.stride,::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
def get_pad_layer(pad_type):
if(pad_type in ['refl','reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl','replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type=='zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized'%pad_type)
return PadLayer
##########################################################################
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride = stride)
##########################################################################
##---------- Selective Kernel Feature Fusion (SKFF) ----------
class SKFF(nn.Module):
def __init__(self, in_channels, height=3,reduction=8,bias=False):
super(SKFF, self).__init__()
self.height = height
d = max(int(in_channels/reduction),4)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU())
self.fcs = nn.ModuleList([])
for i in range(self.height):
self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1,bias=bias))
self.softmax = nn.Softmax(dim=1)
def forward(self, inp_feats):
batch_size = inp_feats[0].shape[0]
n_feats = inp_feats[0].shape[1]
inp_feats = torch.cat(inp_feats, dim=1)
inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
feats_U = torch.sum(inp_feats, dim=1)
feats_S = self.avg_pool(feats_U)
feats_Z = self.conv_du(feats_S)
attention_vectors = [fc(feats_Z) for fc in self.fcs]
attention_vectors = torch.cat(attention_vectors, dim=1)
attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)
# stx()
attention_vectors = self.softmax(attention_vectors)
feats_V = torch.sum(inp_feats*attention_vectors, dim=1)
return feats_V
##########################################################################
##---------- Spatial Attention ----------
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=False, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class ChannelPool(nn.Module):
def forward(self, x):
return torch.cat( (torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1 )
class spatial_attn_layer(nn.Module):
def __init__(self, kernel_size=5):
super(spatial_attn_layer, self).__init__()
self.compress = ChannelPool()
self.spatial = BasicConv(2, 1, kernel_size, stride=1, padding=(kernel_size-1) // 2, relu=False)
def forward(self, x):
# import pdb;pdb.set_trace()
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = torch.sigmoid(x_out) # broadcasting
return x * scale
##########################################################################
## ------ Channel Attention --------------
class ca_layer(nn.Module):
def __init__(self, channel, reduction=8, bias=True):
super(ca_layer, self).__init__()
# global average pooling: feature --> point
self.avg_pool = nn.AdaptiveAvgPool2d(1)
# feature channel downscale and upscale --> channel weight
self.conv_du = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=bias),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=bias),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_du(y)
return x * y
##########################################################################
##---------- Dual Attention Unit (DAU) ----------
class DAU(nn.Module):
def __init__(
self, n_feat, kernel_size=3, reduction=8,
bias=False, bn=False, act=nn.PReLU(), res_scale=1):
super(DAU, self).__init__()
modules_body = [conv(n_feat, n_feat, kernel_size, bias=bias), act, conv(n_feat, n_feat, kernel_size, bias=bias)]
self.body = nn.Sequential(*modules_body)
## Spatial Attention
self.SA = spatial_attn_layer()
## Channel Attention
self.CA = ca_layer(n_feat,reduction, bias=bias)
self.conv1x1 = nn.Conv2d(n_feat*2, n_feat, kernel_size=1, bias=bias)
def forward(self, x):
res = self.body(x)
sa_branch = self.SA(res)
ca_branch = self.CA(res)
res = torch.cat([sa_branch, ca_branch], dim=1)
res = self.conv1x1(res)
res += x
return res
##########################################################################
##---------- Resizing Modules ----------
class ResidualDownSample(nn.Module):
def __init__(self, in_channels, bias=False):
super(ResidualDownSample, self).__init__()
self.top = nn.Sequential(nn.Conv2d(in_channels, in_channels, 1, stride=1, padding=0, bias=bias),
nn.PReLU(),
nn.Conv2d(in_channels, in_channels, 3, stride=1, padding=1, bias=bias),
nn.PReLU(),
downsamp(channels=in_channels,filt_size=3,stride=2),
nn.Conv2d(in_channels, in_channels*2, 1, stride=1, padding=0, bias=bias))
self.bot = nn.Sequential(downsamp(channels=in_channels,filt_size=3,stride=2),
nn.Conv2d(in_channels, in_channels*2, 1, stride=1, padding=0, bias=bias))
def forward(self, x):
top = self.top(x)
bot = self.bot(x)
out = top+bot
return out
class DownSample(nn.Module):
def __init__(self, in_channels, scale_factor, stride=2, kernel_size=3):
super(DownSample, self).__init__()
self.scale_factor = int(np.log2(scale_factor))
modules_body = []
for i in range(self.scale_factor):
modules_body.append(ResidualDownSample(in_channels))
in_channels = int(in_channels * stride)
self.body = nn.Sequential(*modules_body)
def forward(self, x):
x = self.body(x)
return x
class ResidualUpSample(nn.Module):
def __init__(self, in_channels, bias=False):
super(ResidualUpSample, self).__init__()
self.top = nn.Sequential(nn.Conv2d(in_channels, in_channels, 1, stride=1, padding=0, bias=bias),
nn.PReLU(),
nn.ConvTranspose2d(in_channels, in_channels, 3, stride=2, padding=1, output_padding=1,bias=bias),
nn.PReLU(),
nn.Conv2d(in_channels, in_channels//2, 1, stride=1, padding=0, bias=bias))
self.bot = nn.Sequential(nn.Upsample(scale_factor=2, mode='bilinear', align_corners=bias),
nn.Conv2d(in_channels, in_channels//2, 1, stride=1, padding=0, bias=bias))
def forward(self, x):
top = self.top(x)
bot = self.bot(x)
out = top+bot
return out
class UpSample(nn.Module):
def __init__(self, in_channels, scale_factor, stride=2, kernel_size=3):
super(UpSample, self).__init__()
self.scale_factor = int(np.log2(scale_factor))
modules_body = []
for i in range(self.scale_factor):
modules_body.append(ResidualUpSample(in_channels))
in_channels = int(in_channels // stride)
self.body = nn.Sequential(*modules_body)
def forward(self, x):
x = self.body(x)
return x
##########################################################################
##---------- Multi-Scale Resiudal Block (MSRB) ----------
class MSRB(nn.Module):
def __init__(self, n_feat, height, width, stride, bias):
super(MSRB, self).__init__()
self.n_feat, self.height, self.width = n_feat, height, width
self.blocks = nn.ModuleList([nn.ModuleList([DAU(int(n_feat*stride**i))]*width) for i in range(height)])
INDEX = np.arange(0,width, 2)
FEATS = [int((stride**i)*n_feat) for i in range(height)]
SCALE = [2**i for i in range(1,height)]
self.last_up = nn.ModuleDict()
for i in range(1,height):
self.last_up.update({f'{i}': UpSample(int(n_feat*stride**i),2**i,stride)})
self.down = nn.ModuleDict()
self.up = nn.ModuleDict()
i=0
SCALE.reverse()
for feat in FEATS:
for scale in SCALE[i:]:
self.down.update({f'{feat}_{scale}': DownSample(feat,scale,stride)})
i+=1
i=0
FEATS.reverse()
for feat in FEATS:
for scale in SCALE[i:]:
self.up.update({f'{feat}_{scale}': UpSample(feat,scale,stride)})
i+=1
self.conv_out = nn.Conv2d(n_feat, n_feat, kernel_size=3, padding=1, bias=bias)
self.selective_kernel = nn.ModuleList([SKFF(n_feat*stride**i, height) for i in range(height)])
def forward(self, x):
inp = x.clone()
#col 1 only
blocks_out = []
for j in range(self.height):
if j==0:
inp = self.blocks[j][0](inp)
else:
inp = self.blocks[j][0](self.down[f'{inp.size(1)}_{2}'](inp))
blocks_out.append(inp)
#rest of grid
for i in range(1,self.width):
#Mesh
# Replace condition(i%2!=0) with True(Mesh) or False(Plain)
# if i%2!=0:
if True:
tmp=[]
for j in range(self.height):
TENSOR = []
nfeats = (2**j)*self.n_feat
for k in range(self.height):
TENSOR.append(self.select_up_down(blocks_out[k], j, k))
selective_kernel_fusion = self.selective_kernel[j](TENSOR)
tmp.append(selective_kernel_fusion)
#Plain
else:
tmp = blocks_out
#Forward through either mesh or plain
for j in range(self.height):
blocks_out[j] = self.blocks[j][i](tmp[j])
#Sum after grid
out=[]
for k in range(self.height):
out.append(self.select_last_up(blocks_out[k], k))
out = self.selective_kernel[0](out)
out = self.conv_out(out)
out = out + x
return out
def select_up_down(self, tensor, j, k):
if j==k:
return tensor
else:
diff = 2 ** np.abs(j-k)
if j<k:
return self.up[f'{tensor.size(1)}_{diff}'](tensor)
else:
return self.down[f'{tensor.size(1)}_{diff}'](tensor)
def select_last_up(self, tensor, k):
if k==0:
return tensor
else:
return self.last_up[f'{k}'](tensor)
##########################################################################
##---------- Recursive Residual Group (RRG) ----------
class RRG(nn.Module):
def __init__(self, n_feat, n_MSRB, height, width, stride, bias=False):
super(RRG, self).__init__()
modules_body = [MSRB(n_feat, height, width, stride, bias) for _ in range(n_MSRB)]
modules_body.append(conv(n_feat, n_feat, kernel_size=3))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
res = self.body(x)
res += x
return res
##########################################################################
##---------- MIRNet -----------------------
class MIRNet(nn.Module):
def __init__(self, in_channels=3, out_channels=3, n_feat=64, kernel_size=3, stride=2, n_RRG=3, n_MSRB=2, height=3, width=2, bias=False):
super(MIRNet, self).__init__()
self.conv_in = nn.Conv2d(in_channels, n_feat, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=bias)
modules_body = [RRG(n_feat, n_MSRB, height, width, stride, bias) for _ in range(n_RRG)]
self.body = nn.Sequential(*modules_body)
self.conv_out = nn.Conv2d(n_feat, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=bias)
def forward(self, x):
h = self.conv_in(x)
h = self.body(h)
h = self.conv_out(h)
h += x
return | 15,375 | 35.961538 | 154 | py |
MaskedDenoising | MaskedDenoising-main/models/network_ffdnet.py | import numpy as np
import torch.nn as nn
import models.basicblock as B
import torch
"""
# --------------------------------------------
# FFDNet (15 or 12 conv layers)
# --------------------------------------------
Reference:
@article{zhang2018ffdnet,
title={FFDNet: Toward a fast and flexible solution for CNN-based image denoising},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
journal={IEEE Transactions on Image Processing},
volume={27},
number={9},
pages={4608--4622},
year={2018},
publisher={IEEE}
}
"""
# --------------------------------------------
# FFDNet
# --------------------------------------------
class FFDNet(nn.Module):
def __init__(self, in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R'):
"""
# ------------------------------------
in_nc: channel number of input
out_nc: channel number of output
nc: channel number
nb: total number of conv layers
act_mode: batch norm + activation function; 'BR' means BN+ReLU.
# ------------------------------------
# ------------------------------------
"""
super(FFDNet, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
bias = True
sf = 2
self.m_down = B.PixelUnShuffle(upscale_factor=sf)
m_head = B.conv(in_nc*sf*sf+1, nc, mode='C'+act_mode[-1], bias=bias)
m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)]
m_tail = B.conv(nc, out_nc*sf*sf, mode='C', bias=bias)
self.model = B.sequential(m_head, *m_body, m_tail)
self.m_up = nn.PixelShuffle(upscale_factor=sf)
def forward(self, x, sigma):
h, w = x.size()[-2:]
paddingBottom = int(np.ceil(h/2)*2-h)
paddingRight = int(np.ceil(w/2)*2-w)
x = torch.nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x = self.m_down(x)
# m = torch.ones(sigma.size()[0], sigma.size()[1], x.size()[-2], x.size()[-1]).type_as(x).mul(sigma)
m = sigma.repeat(1, 1, x.size()[-2], x.size()[-1])
x = torch.cat((x, m), 1)
x = self.model(x)
x = self.m_up(x)
x = x[..., :h, :w]
return x
if __name__ == '__main__':
from utils import utils_model
model = FFDNet(in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R')
print(utils_model.describe_model(model))
x = torch.randn((2,1,240,240))
sigma = torch.randn(2,1,1,1)
x = model(x, sigma)
print(x.shape)
# run models/network_ffdnet.py
| 2,593 | 29.517647 | 108 | py |
MaskedDenoising | MaskedDenoising-main/models/basicblock.py | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
'''
# --------------------------------------------
# Advanced nn.Sequential
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
def sequential(*args):
"""Advanced nn.Sequential.
Args:
nn.Sequential, nn.Module
Returns:
nn.Sequential
"""
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('sequential does not support OrderedDict input.')
return args[0] # No sequential is needed.
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module.children():
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
'''
# --------------------------------------------
# Useful blocks
# https://github.com/xinntao/BasicSR
# --------------------------------
# conv + normaliation + relu (conv)
# (PixelUnShuffle)
# (ConditionalBatchNorm2d)
# concat (ConcatBlock)
# sum (ShortcutBlock)
# resblock (ResBlock)
# Channel Attention (CA) Layer (CALayer)
# Residual Channel Attention Block (RCABlock)
# Residual Channel Attention Group (RCAGroup)
# Residual Dense Block (ResidualDenseBlock_5C)
# Residual in Residual Dense Block (RRDB)
# --------------------------------------------
'''
# --------------------------------------------
# return nn.Sequantial of (Conv + BN + ReLU)
# --------------------------------------------
def conv(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CBR', negative_slope=0.2):
L = []
for t in mode:
if t == 'C':
L.append(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'T':
L.append(nn.ConvTranspose2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias))
elif t == 'B':
L.append(nn.BatchNorm2d(out_channels, momentum=0.9, eps=1e-04, affine=True))
elif t == 'I':
L.append(nn.InstanceNorm2d(out_channels, affine=True))
elif t == 'R':
L.append(nn.ReLU(inplace=True))
elif t == 'r':
L.append(nn.ReLU(inplace=False))
elif t == 'L':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=True))
elif t == 'l':
L.append(nn.LeakyReLU(negative_slope=negative_slope, inplace=False))
elif t == '2':
L.append(nn.PixelShuffle(upscale_factor=2))
elif t == '3':
L.append(nn.PixelShuffle(upscale_factor=3))
elif t == '4':
L.append(nn.PixelShuffle(upscale_factor=4))
elif t == 'U':
L.append(nn.Upsample(scale_factor=2, mode='nearest'))
elif t == 'u':
L.append(nn.Upsample(scale_factor=3, mode='nearest'))
elif t == 'v':
L.append(nn.Upsample(scale_factor=4, mode='nearest'))
elif t == 'M':
L.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=0))
elif t == 'A':
L.append(nn.AvgPool2d(kernel_size=kernel_size, stride=stride, padding=0))
else:
raise NotImplementedError('Undefined type: '.format(t))
return sequential(*L)
# --------------------------------------------
# inverse of pixel_shuffle
# --------------------------------------------
def pixel_unshuffle(input, upscale_factor):
r"""Rearranges elements in a Tensor of shape :math:`(C, rH, rW)` to a
tensor of shape :math:`(*, r^2C, H, W)`.
Authors:
Zhaoyi Yan, https://github.com/Zhaoyi-Yan
Kai Zhang, https://github.com/cszn/FFDNet
Date:
01/Jan/2019
"""
batch_size, channels, in_height, in_width = input.size()
out_height = in_height // upscale_factor
out_width = in_width // upscale_factor
input_view = input.contiguous().view(
batch_size, channels, out_height, upscale_factor,
out_width, upscale_factor)
channels *= upscale_factor ** 2
unshuffle_out = input_view.permute(0, 1, 3, 5, 2, 4).contiguous()
return unshuffle_out.view(batch_size, channels, out_height, out_width)
class PixelUnShuffle(nn.Module):
r"""Rearranges elements in a Tensor of shape :math:`(C, rH, rW)` to a
tensor of shape :math:`(*, r^2C, H, W)`.
Authors:
Zhaoyi Yan, https://github.com/Zhaoyi-Yan
Kai Zhang, https://github.com/cszn/FFDNet
Date:
01/Jan/2019
"""
def __init__(self, upscale_factor):
super(PixelUnShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input):
return pixel_unshuffle(input, self.upscale_factor)
def extra_repr(self):
return 'upscale_factor={}'.format(self.upscale_factor)
# --------------------------------------------
# conditional batch norm
# https://github.com/pytorch/pytorch/issues/8985#issuecomment-405080775
# --------------------------------------------
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, affine=False)
self.embed = nn.Embedding(num_classes, num_features * 2)
self.embed.weight.data[:, :num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02)
self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0
def forward(self, x, y):
out = self.bn(x)
gamma, beta = self.embed(y).chunk(2, 1)
out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1)
return out
# --------------------------------------------
# Concat the output of a submodule to its input
# --------------------------------------------
class ConcatBlock(nn.Module):
def __init__(self, submodule):
super(ConcatBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = torch.cat((x, self.sub(x)), dim=1)
return output
def __repr__(self):
return self.sub.__repr__() + 'concat'
# --------------------------------------------
# sum the output of a submodule to its input
# --------------------------------------------
class ShortcutBlock(nn.Module):
def __init__(self, submodule):
super(ShortcutBlock, self).__init__()
self.sub = submodule
def forward(self, x):
output = x + self.sub(x)
return output
def __repr__(self):
tmpstr = 'Identity + \n|'
modstr = self.sub.__repr__().replace('\n', '\n|')
tmpstr = tmpstr + modstr
return tmpstr
# --------------------------------------------
# Res Block: x + conv(relu(conv(x)))
# --------------------------------------------
class ResBlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', negative_slope=0.2):
super(ResBlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R', 'L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
def forward(self, x):
res = self.res(x)
return x + res
# --------------------------------------------
# simplified information multi-distillation block (IMDB)
# x + conv1(concat(split(relu(conv(x)))x3))
# --------------------------------------------
class IMDBlock(nn.Module):
"""
@inproceedings{hui2019lightweight,
title={Lightweight Image Super-Resolution with Information Multi-distillation Network},
author={Hui, Zheng and Gao, Xinbo and Yang, Yunchu and Wang, Xiumei},
booktitle={Proceedings of the 27th ACM International Conference on Multimedia (ACM MM)},
pages={2024--2032},
year={2019}
}
@inproceedings{zhang2019aim,
title={AIM 2019 Challenge on Constrained Super-Resolution: Methods and Results},
author={Kai Zhang and Shuhang Gu and Radu Timofte and others},
booktitle={IEEE International Conference on Computer Vision Workshops},
year={2019}
}
"""
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.25, negative_slope=0.05):
super(IMDBlock, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = int(in_channels - self.d_nc)
assert mode[0] == 'C', 'convolutional layer first'
self.conv1 = conv(in_channels, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv2 = conv(self.r_nc, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv3 = conv(self.r_nc, in_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv4 = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias, mode[0], negative_slope)
self.conv1x1 = conv(self.d_nc*4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0], negative_slope=negative_slope)
def forward(self, x):
d1, r1 = torch.split(self.conv1(x), (self.d_nc, self.r_nc), dim=1)
d2, r2 = torch.split(self.conv2(r1), (self.d_nc, self.r_nc), dim=1)
d3, r3 = torch.split(self.conv3(r2), (self.d_nc, self.r_nc), dim=1)
d4 = self.conv4(r3)
res = self.conv1x1(torch.cat((d1, d2, d3, d4), dim=1))
return x + res
# --------------------------------------------
# Enhanced Spatial Attention (ESA)
# --------------------------------------------
class ESA(nn.Module):
def __init__(self, channel=64, reduction=4, bias=True):
super(ESA, self).__init__()
# -->conv3x3(conv21)-----------------------------------------------------------------------------------------+
# conv1x1(conv1)-->conv3x3-2(conv2)-->maxpool7-3-->conv3x3(conv3)(relu)-->conv3x3(conv4)(relu)-->conv3x3(conv5)-->bilinear--->conv1x1(conv6)-->sigmoid
self.r_nc = channel // reduction
self.conv1 = nn.Conv2d(channel, self.r_nc, kernel_size=1)
self.conv21 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=1)
self.conv2 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, stride=2, padding=0)
self.conv3 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(self.r_nc, self.r_nc, kernel_size=3, padding=1)
self.conv6 = nn.Conv2d(self.r_nc, channel, kernel_size=1)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x1 = self.conv1(x)
x2 = F.max_pool2d(self.conv2(x1), kernel_size=7, stride=3) # 1/6
x2 = self.relu(self.conv3(x2))
x2 = self.relu(self.conv4(x2))
x2 = F.interpolate(self.conv5(x2), (x.size(2), x.size(3)), mode='bilinear', align_corners=False)
x2 = self.conv6(x2 + self.conv21(x1))
return x.mul(self.sigmoid(x2))
# return x.mul_(self.sigmoid(x2))
class CFRB(nn.Module):
def __init__(self, in_channels=50, out_channels=50, kernel_size=3, stride=1, padding=1, bias=True, mode='CL', d_rate=0.5, negative_slope=0.05):
super(CFRB, self).__init__()
self.d_nc = int(in_channels * d_rate)
self.r_nc = in_channels # int(in_channels - self.d_nc)
assert mode[0] == 'C', 'convolutional layer first'
self.conv1_d = conv(in_channels, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv1_r = conv(in_channels, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv2_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv2_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv3_d = conv(self.r_nc, self.d_nc, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.conv3_r = conv(self.r_nc, self.r_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv4_d = conv(self.r_nc, self.d_nc, kernel_size, stride, padding, bias=bias, mode=mode[0])
self.conv1x1 = conv(self.d_nc*4, out_channels, kernel_size=1, stride=1, padding=0, bias=bias, mode=mode[0])
self.act = conv(mode=mode[-1], negative_slope=negative_slope)
self.esa = ESA(in_channels, reduction=4, bias=True)
def forward(self, x):
d1 = self.conv1_d(x)
x = self.act(self.conv1_r(x)+x)
d2 = self.conv2_d(x)
x = self.act(self.conv2_r(x)+x)
d3 = self.conv3_d(x)
x = self.act(self.conv3_r(x)+x)
x = self.conv4_d(x)
x = self.act(torch.cat([d1, d2, d3, x], dim=1))
x = self.esa(self.conv1x1(x))
return x
# --------------------------------------------
# Channel Attention (CA) Layer
# --------------------------------------------
class CALayer(nn.Module):
def __init__(self, channel=64, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_fc = nn.Sequential(
nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
nn.Sigmoid()
)
def forward(self, x):
y = self.avg_pool(x)
y = self.conv_fc(y)
return x * y
# --------------------------------------------
# Residual Channel Attention Block (RCAB)
# --------------------------------------------
class RCABlock(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, negative_slope=0.2):
super(RCABlock, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
self.res = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
self.ca = CALayer(out_channels, reduction)
def forward(self, x):
res = self.res(x)
res = self.ca(res)
return res + x
# --------------------------------------------
# Residual Channel Attention Group (RG)
# --------------------------------------------
class RCAGroup(nn.Module):
def __init__(self, in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='CRC', reduction=16, nb=12, negative_slope=0.2):
super(RCAGroup, self).__init__()
assert in_channels == out_channels, 'Only support in_channels==out_channels.'
if mode[0] in ['R','L']:
mode = mode[0].lower() + mode[1:]
RG = [RCABlock(in_channels, out_channels, kernel_size, stride, padding, bias, mode, reduction, negative_slope) for _ in range(nb)]
RG.append(conv(out_channels, out_channels, mode='C'))
self.rg = nn.Sequential(*RG) # self.rg = ShortcutBlock(nn.Sequential(*RG))
def forward(self, x):
res = self.rg(x)
return res + x
# --------------------------------------------
# Residual Dense Block
# style: 5 convs
# --------------------------------------------
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR', negative_slope=0.2):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel
self.conv1 = conv(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv2 = conv(nc+gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv3 = conv(nc+2*gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv4 = conv(nc+3*gc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.conv5 = conv(nc+4*gc, nc, kernel_size, stride, padding, bias, mode[:-1], negative_slope)
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(torch.cat((x, x1), 1))
x3 = self.conv3(torch.cat((x, x1, x2), 1))
x4 = self.conv4(torch.cat((x, x1, x2, x3), 1))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5.mul_(0.2) + x
# --------------------------------------------
# Residual in Residual Dense Block
# 3x5c
# --------------------------------------------
class RRDB(nn.Module):
def __init__(self, nc=64, gc=32, kernel_size=3, stride=1, padding=1, bias=True, mode='CR', negative_slope=0.2):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.RDB2 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
self.RDB3 = ResidualDenseBlock_5C(nc, gc, kernel_size, stride, padding, bias, mode, negative_slope)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out.mul_(0.2) + x
"""
# --------------------------------------------
# Upsampler
# Kai Zhang, https://github.com/cszn/KAIR
# --------------------------------------------
# upsample_pixelshuffle
# upsample_upconv
# upsample_convtranspose
# --------------------------------------------
"""
# --------------------------------------------
# conv + subp (+ relu)
# --------------------------------------------
def upsample_pixelshuffle(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
up1 = conv(in_channels, out_channels * (int(mode[0]) ** 2), kernel_size, stride, padding, bias, mode='C'+mode, negative_slope=negative_slope)
return up1
# --------------------------------------------
# nearest_upsample + conv (+ R)
# --------------------------------------------
def upsample_upconv(in_channels=64, out_channels=3, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR'
if mode[0] == '2':
uc = 'UC'
elif mode[0] == '3':
uc = 'uC'
elif mode[0] == '4':
uc = 'vC'
mode = mode.replace(mode[0], uc)
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode, negative_slope=negative_slope)
return up1
# --------------------------------------------
# convTranspose (+ relu)
# --------------------------------------------
def upsample_convtranspose(in_channels=64, out_channels=3, kernel_size=2, stride=2, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'T')
up1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
return up1
'''
# --------------------------------------------
# Downsampler
# Kai Zhang, https://github.com/cszn/KAIR
# --------------------------------------------
# downsample_strideconv
# downsample_maxpool
# downsample_avgpool
# --------------------------------------------
'''
# --------------------------------------------
# strideconv (+ relu)
# --------------------------------------------
def downsample_strideconv(in_channels=64, out_channels=64, kernel_size=2, stride=2, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3', '4'], 'mode examples: 2, 2R, 2BR, 3, ..., 4BR.'
kernel_size = int(mode[0])
stride = int(mode[0])
mode = mode.replace(mode[0], 'C')
down1 = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode, negative_slope)
return down1
# --------------------------------------------
# maxpooling + conv (+ relu)
# --------------------------------------------
def downsample_maxpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=0, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'MC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0], negative_slope=negative_slope)
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:], negative_slope=negative_slope)
return sequential(pool, pool_tail)
# --------------------------------------------
# averagepooling + conv (+ relu)
# --------------------------------------------
def downsample_avgpool(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1, bias=True, mode='2R', negative_slope=0.2):
assert len(mode)<4 and mode[0] in ['2', '3'], 'mode examples: 2, 2R, 2BR, 3, ..., 3BR.'
kernel_size_pool = int(mode[0])
stride_pool = int(mode[0])
mode = mode.replace(mode[0], 'AC')
pool = conv(kernel_size=kernel_size_pool, stride=stride_pool, mode=mode[0], negative_slope=negative_slope)
pool_tail = conv(in_channels, out_channels, kernel_size, stride, padding, bias, mode=mode[1:], negative_slope=negative_slope)
return sequential(pool, pool_tail)
'''
# --------------------------------------------
# NonLocalBlock2D:
# embedded_gaussian
# +W(softmax(thetaXphi)Xg)
# --------------------------------------------
'''
# --------------------------------------------
# non-local block with embedded_gaussian
# https://github.com/AlexHex7/Non-local_pytorch
# --------------------------------------------
class NonLocalBlock2D(nn.Module):
def __init__(self, nc=64, kernel_size=1, stride=1, padding=0, bias=True, act_mode='B', downsample=False, downsample_mode='maxpool', negative_slope=0.2):
super(NonLocalBlock2D, self).__init__()
inter_nc = nc // 2
self.inter_nc = inter_nc
self.W = conv(inter_nc, nc, kernel_size, stride, padding, bias, mode='C'+act_mode)
self.theta = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
if downsample:
if downsample_mode == 'avgpool':
downsample_block = downsample_avgpool
elif downsample_mode == 'maxpool':
downsample_block = downsample_maxpool
elif downsample_mode == 'strideconv':
downsample_block = downsample_strideconv
else:
raise NotImplementedError('downsample mode [{:s}] is not found'.format(downsample_mode))
self.phi = downsample_block(nc, inter_nc, kernel_size, stride, padding, bias, mode='2')
self.g = downsample_block(nc, inter_nc, kernel_size, stride, padding, bias, mode='2')
else:
self.phi = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
self.g = conv(nc, inter_nc, kernel_size, stride, padding, bias, mode='C')
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_nc, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_nc, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_nc, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_nc, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
| 24,138 | 39.775338 | 160 | py |
MaskedDenoising | MaskedDenoising-main/models/common.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, bias=False,
bn=True, act=nn.ReLU(True)):
m = [nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), stride=stride, bias=bias)
]
if bn: m.append(nn.BatchNorm2d(out_channels))
if act is not None: m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
if bn: m.append(nn.BatchNorm2d(n_feat))
if i == 0: m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale)
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feat, 4 * n_feat, 3, bias))
m.append(nn.PixelShuffle(2))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
elif scale == 3:
m.append(conv(n_feat, 9 * n_feat, 3, bias))
m.append(nn.PixelShuffle(3))
if bn: m.append(nn.BatchNorm2d(n_feat))
if act: m.append(act())
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
# add NonLocalBlock2D
# reference: https://github.com/AlexHex7/Non-local_pytorch/blob/master/lib/non_local_simple_version.py
class NonLocalBlock2D(nn.Module):
def __init__(self, in_channels, inter_channels):
super(NonLocalBlock2D, self).__init__()
self.in_channels = in_channels
self.inter_channels = inter_channels
self.g = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.W = nn.Conv2d(in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0)
nn.init.constant(self.W.weight, 0)
nn.init.constant(self.W.bias, 0)
self.theta = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
self.phi = nn.Conv2d(in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0,2,1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0,2,1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0,2,1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
## define trunk branch
class TrunkBranch(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(TrunkBranch, self).__init__()
modules_body = []
for i in range(2):
modules_body.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
self.body = nn.Sequential(*modules_body)
def forward(self, x):
tx = self.body(x)
return tx
## define mask branch
class MaskBranchDownUp(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(MaskBranchDownUp, self).__init__()
MB_RB1 = []
MB_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Down = []
MB_Down.append(nn.Conv2d(n_feat,n_feat, 3, stride=2, padding=1))
MB_RB2 = []
for i in range(2):
MB_RB2.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Up = []
MB_Up.append(nn.ConvTranspose2d(n_feat,n_feat, 6, stride=2, padding=2))
MB_RB3 = []
MB_RB3.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_1x1conv = []
MB_1x1conv.append(nn.Conv2d(n_feat,n_feat, 1, padding=0, bias=True))
MB_sigmoid = []
MB_sigmoid.append(nn.Sigmoid())
self.MB_RB1 = nn.Sequential(*MB_RB1)
self.MB_Down = nn.Sequential(*MB_Down)
self.MB_RB2 = nn.Sequential(*MB_RB2)
self.MB_Up = nn.Sequential(*MB_Up)
self.MB_RB3 = nn.Sequential(*MB_RB3)
self.MB_1x1conv = nn.Sequential(*MB_1x1conv)
self.MB_sigmoid = nn.Sequential(*MB_sigmoid)
def forward(self, x):
x_RB1 = self.MB_RB1(x)
x_Down = self.MB_Down(x_RB1)
x_RB2 = self.MB_RB2(x_Down)
x_Up = self.MB_Up(x_RB2)
x_preRB3 = x_RB1 + x_Up
x_RB3 = self.MB_RB3(x_preRB3)
x_1x1 = self.MB_1x1conv(x_RB3)
mx = self.MB_sigmoid(x_1x1)
return mx
## define nonlocal mask branch
class NLMaskBranchDownUp(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(NLMaskBranchDownUp, self).__init__()
MB_RB1 = []
MB_RB1.append(NonLocalBlock2D(n_feat, n_feat // 2))
MB_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Down = []
MB_Down.append(nn.Conv2d(n_feat,n_feat, 3, stride=2, padding=1))
MB_RB2 = []
for i in range(2):
MB_RB2.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_Up = []
MB_Up.append(nn.ConvTranspose2d(n_feat,n_feat, 6, stride=2, padding=2))
MB_RB3 = []
MB_RB3.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
MB_1x1conv = []
MB_1x1conv.append(nn.Conv2d(n_feat,n_feat, 1, padding=0, bias=True))
MB_sigmoid = []
MB_sigmoid.append(nn.Sigmoid())
self.MB_RB1 = nn.Sequential(*MB_RB1)
self.MB_Down = nn.Sequential(*MB_Down)
self.MB_RB2 = nn.Sequential(*MB_RB2)
self.MB_Up = nn.Sequential(*MB_Up)
self.MB_RB3 = nn.Sequential(*MB_RB3)
self.MB_1x1conv = nn.Sequential(*MB_1x1conv)
self.MB_sigmoid = nn.Sequential(*MB_sigmoid)
def forward(self, x):
x_RB1 = self.MB_RB1(x)
x_Down = self.MB_Down(x_RB1)
x_RB2 = self.MB_RB2(x_Down)
x_Up = self.MB_Up(x_RB2)
x_preRB3 = x_RB1 + x_Up
x_RB3 = self.MB_RB3(x_preRB3)
x_1x1 = self.MB_1x1conv(x_RB3)
mx = self.MB_sigmoid(x_1x1)
return mx
## define residual attention module
class ResAttModuleDownUpPlus(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResAttModuleDownUpPlus, self).__init__()
RA_RB1 = []
RA_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_TB = []
RA_TB.append(TrunkBranch(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_MB = []
RA_MB.append(MaskBranchDownUp(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_tail = []
for i in range(2):
RA_tail.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
self.RA_RB1 = nn.Sequential(*RA_RB1)
self.RA_TB = nn.Sequential(*RA_TB)
self.RA_MB = nn.Sequential(*RA_MB)
self.RA_tail = nn.Sequential(*RA_tail)
def forward(self, input):
RA_RB1_x = self.RA_RB1(input)
tx = self.RA_TB(RA_RB1_x)
mx = self.RA_MB(RA_RB1_x)
txmx = tx * mx
hx = txmx + RA_RB1_x
hx = self.RA_tail(hx)
return hx
## define nonlocal residual attention module
class NLResAttModuleDownUpPlus(nn.Module):
def __init__(
self, conv, n_feat, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(NLResAttModuleDownUpPlus, self).__init__()
RA_RB1 = []
RA_RB1.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_TB = []
RA_TB.append(TrunkBranch(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_MB = []
RA_MB.append(NLMaskBranchDownUp(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
RA_tail = []
for i in range(2):
RA_tail.append(ResBlock(conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1))
self.RA_RB1 = nn.Sequential(*RA_RB1)
self.RA_TB = nn.Sequential(*RA_TB)
self.RA_MB = nn.Sequential(*RA_MB)
self.RA_tail = nn.Sequential(*RA_tail)
def forward(self, input):
RA_RB1_x = self.RA_RB1(input)
tx = self.RA_TB(RA_RB1_x)
mx = self.RA_MB(RA_RB1_x)
txmx = tx * mx
hx = txmx + RA_RB1_x
hx = self.RA_tail(hx)
return hx | 10,839 | 33.74359 | 130 | py |
MaskedDenoising | MaskedDenoising-main/models/network_rrdbnet.py | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale # for residual block
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
def make_layer(block, n_layers):
layers = []
for _ in range(n_layers):
layers.append(block())
return nn.Sequential(*layers)
class ResidualDenseBlock_5C(nn.Module):
def __init__(self, nf=64, gc=32, bias=True):
super(ResidualDenseBlock_5C, self).__init__()
# gc: growth channel, i.e. intermediate channels
self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias=bias)
self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias=bias)
self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias=bias)
self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias=bias)
self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias=bias)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
# initialization
initialize_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
return x5 * 0.2 + x
class RRDB(nn.Module):
'''Residual in Residual Dense Block'''
def __init__(self, nf, gc=32):
super(RRDB, self).__init__()
self.RDB1 = ResidualDenseBlock_5C(nf, gc)
self.RDB2 = ResidualDenseBlock_5C(nf, gc)
self.RDB3 = ResidualDenseBlock_5C(nf, gc)
def forward(self, x):
out = self.RDB1(x)
out = self.RDB2(out)
out = self.RDB3(out)
return out * 0.2 + x
class RRDBNet(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=23, gc=32, sf=4):
super(RRDBNet, self).__init__()
RRDB_block_f = functools.partial(RRDB, nf=nf, gc=gc)
self.sf = sf
print([in_nc, out_nc, nf, nb, gc, sf])
self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
self.RRDB_trunk = make_layer(RRDB_block_f, nb)
self.trunk_conv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
#### upsampling
self.upconv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
if self.sf==4:
self.upconv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.HRconv = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
def forward(self, x):
fea = self.conv_first(x)
trunk = self.trunk_conv(self.RRDB_trunk(fea))
fea = fea + trunk
fea = self.lrelu(self.upconv1(F.interpolate(fea, scale_factor=2, mode='nearest')))
if self.sf == 4:
fea = self.lrelu(self.upconv2(F.interpolate(fea, scale_factor=2, mode='nearest')))
out = self.conv_last(self.lrelu(self.HRconv(fea)))
return out
| 3,777 | 35.326923 | 94 | py |
MaskedDenoising | MaskedDenoising-main/models/network_faceenhancer.py | '''
@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)
@author: yangxy (yangtao9009@gmail.com)
# 2021-06-03, modified by Kai
'''
import sys
op_path = 'models'
if op_path not in sys.path:
sys.path.insert(0, op_path)
from op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
import math
import random
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
isconcat = True
sss = 2 if isconcat else 1
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * (factor ** 2)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
return out
class Downsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor ** 2)
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualConv2d(nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
super().__init__()
self.weight = nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
)
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
out = F.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class ScaledLeakyReLU(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input):
out = F.leaky_relu(input, negative_slope=self.negative_slope)
return out * math.sqrt(2)
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1],
):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
f'upsample={self.upsample}, downsample={self.downsample})'
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is not None:
#print(image.shape, noise.shape)
if isconcat: return torch.cat((image, self.weight * noise), dim=1) # concat
return image + self.weight * noise
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
#return torch.cat((image, self.weight * noise), dim=1)
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyledConv(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=False,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
):
super().__init__()
self.conv = ModulatedConv2d(
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=upsample,
blur_kernel=blur_kernel,
demodulate=demodulate,
)
self.noise = NoiseInjection()
#self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
#self.activate = ScaledLeakyReLU(0.2)
self.activate = FusedLeakyReLU(out_channel*sss)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
# out = out + self.bias
out = self.activate(out)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
class Generator(nn.Module):
def __init__(
self,
size,
style_dim,
n_mlp,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
):
super().__init__()
self.size = size
self.n_mlp = n_mlp
self.style_dim = style_dim
layers = [PixelNorm()]
for i in range(n_mlp):
layers.append(
EqualLinear(
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
)
)
self.style = nn.Sequential(*layers)
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
self.input = ConstantInput(self.channels[4])
self.conv1 = StyledConv(
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
)
self.to_rgb1 = ToRGB(self.channels[4]*sss, style_dim, upsample=False)
self.log_size = int(math.log(size, 2))
self.convs = nn.ModuleList()
self.upsamples = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
in_channel = self.channels[4]
for i in range(3, self.log_size + 1):
out_channel = self.channels[2 ** i]
self.convs.append(
StyledConv(
in_channel*sss,
out_channel,
3,
style_dim,
upsample=True,
blur_kernel=blur_kernel,
)
)
self.convs.append(
StyledConv(
out_channel*sss, out_channel, 3, style_dim, blur_kernel=blur_kernel
)
)
self.to_rgbs.append(ToRGB(out_channel*sss, style_dim))
in_channel = out_channel
self.n_latent = self.log_size * 2 - 2
def make_noise(self):
device = self.input.input.device
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
for i in range(3, self.log_size + 1):
for _ in range(2):
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
return noises
def mean_latent(self, n_latent):
latent_in = torch.randn(
n_latent, self.style_dim, device=self.input.input.device
)
latent = self.style(latent_in).mean(0, keepdim=True)
return latent
def get_latent(self, input):
return self.style(input)
def forward(
self,
styles,
return_latents=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
noise=None,
):
if not input_is_latent:
styles = [self.style(s) for s in styles]
if noise is None:
'''
noise = [None] * (2 * (self.log_size - 2) + 1)
'''
noise = []
batch = styles[0].shape[0]
for i in range(self.n_mlp + 1):
size = 2 ** (i+2)
noise.append(torch.randn(batch, self.channels[size], size, size, device=styles[0].device))
#print(self.channels[size], size)
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = self.n_latent
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
if inject_index is None:
inject_index = random.randint(1, self.n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
out = self.input(latent)
out = self.conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
noise_i = 1
outs = []
for conv1, conv2, to_rgb in zip(
self.convs[::2], self.convs[1::2], self.to_rgbs
):
#print(out.shape, noise[(noise_i)//2].shape, noise[(noise_i + 1)//2].shape)
out = conv1(out, latent[:, i], noise=noise[(noise_i + 1)//2]) ### 1 for 2
out = conv2(out, latent[:, i + 1], noise=noise[(noise_i + 2)//2]) ### 1 for 2
skip = to_rgb(out, latent[:, i + 2], skip)
#outs.append(skip.clone())
i += 2
noise_i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
)
)
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channel))
else:
layers.append(ScaledLeakyReLU(0.2))
super().__init__(*layers)
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
self.skip = ConvLayer(
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
# -----------------------------
# Main model
# -----------------------------
class FullGenerator(nn.Module):
def __init__(
self,
size,
style_dim,
n_mlp,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
self.log_size = int(math.log(size, 2))
self.generator = Generator(size, style_dim, n_mlp, channel_multiplier=channel_multiplier, blur_kernel=blur_kernel, lr_mlp=lr_mlp)
conv = [ConvLayer(3, channels[size], 1)]
self.ecd0 = nn.Sequential(*conv)
in_channel = channels[size]
self.names = ['ecd%d'%i for i in range(self.log_size-1)]
for i in range(self.log_size, 2, -1):
out_channel = channels[2 ** (i - 1)]
#conv = [ResBlock(in_channel, out_channel, blur_kernel)]
conv = [ConvLayer(in_channel, out_channel, 3, downsample=True)]
setattr(self, self.names[self.log_size-i+1], nn.Sequential(*conv))
in_channel = out_channel
self.final_linear = nn.Sequential(EqualLinear(channels[4] * 4 * 4, style_dim, activation='fused_lrelu'))
def forward(self,
inputs,
return_latents=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
):
noise = []
for i in range(self.log_size-1):
ecd = getattr(self, self.names[i])
inputs = ecd(inputs)
noise.append(inputs)
#print(inputs.shape)
inputs = inputs.view(inputs.shape[0], -1)
outs = self.final_linear(inputs)
#print(outs.shape)
outs = self.generator([outs], return_latents, inject_index, truncation, truncation_latent, input_is_latent, noise=noise[::-1])
return outs
| 19,199 | 26.906977 | 137 | py |
MaskedDenoising | MaskedDenoising-main/models/loss_ssim.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
"""
# ============================================
# SSIM loss
# https://github.com/Po-Hsun-Su/pytorch-ssim
# ============================================
"""
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size//2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size//2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding=window_size//2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding=window_size//2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding=window_size//2, groups=channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIMLoss(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIMLoss, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
if __name__ == '__main__':
import cv2
from torch import optim
from skimage import io
npImg1 = cv2.imread("einstein.png")
img1 = torch.from_numpy(np.rollaxis(npImg1, 2)).float().unsqueeze(0)/255.0
img2 = torch.rand(img1.size())
if torch.cuda.is_available():
img1 = img1.cuda()
img2 = img2.cuda()
img1 = Variable(img1, requires_grad=False)
img2 = Variable(img2, requires_grad=True)
ssim_value = ssim(img1, img2).item()
print("Initial ssim:", ssim_value)
ssim_loss = SSIMLoss()
optimizer = optim.Adam([img2], lr=0.01)
while ssim_value < 0.99:
optimizer.zero_grad()
ssim_out = -ssim_loss(img1, img2)
ssim_value = -ssim_out.item()
print('{:<4.4f}'.format(ssim_value))
ssim_out.backward()
optimizer.step()
img = np.transpose(img2.detach().cpu().squeeze().float().numpy(), (1,2,0))
io.imshow(np.uint8(np.clip(img*255, 0, 255)))
| 3,708 | 30.974138 | 104 | py |
MaskedDenoising | MaskedDenoising-main/models/network_dpsr.py | import math
import torch.nn as nn
import models.basicblock as B
"""
# --------------------------------------------
# modified SRResNet
# -- MSRResNet_prior (for DPSR)
# --------------------------------------------
References:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
@inproceedings{wang2018esrgan,
title={Esrgan: Enhanced super-resolution generative adversarial networks},
author={Wang, Xintao and Yu, Ke and Wu, Shixiang and Gu, Jinjin and Liu, Yihao and Dong, Chao and Qiao, Yu and Change Loy, Chen},
booktitle={European Conference on Computer Vision (ECCV)},
pages={0--0},
year={2018}
}
@inproceedings{ledig2017photo,
title={Photo-realistic single image super-resolution using a generative adversarial network},
author={Ledig, Christian and Theis, Lucas and Husz{\'a}r, Ferenc and Caballero, Jose and Cunningham, Andrew and Acosta, Alejandro and Aitken, Andrew and Tejani, Alykhan and Totz, Johannes and Wang, Zehan and others},
booktitle={IEEE conference on computer vision and pattern recognition},
pages={4681--4690},
year={2017}
}
# --------------------------------------------
"""
# --------------------------------------------
# MSRResNet super-resolver prior for DPSR
# https://github.com/cszn/DPSR
# https://github.com/cszn/DPSR/blob/master/models/network_srresnet.py
# --------------------------------------------
class MSRResNet_prior(nn.Module):
def __init__(self, in_nc=4, out_nc=3, nc=96, nb=16, upscale=4, act_mode='R', upsample_mode='upconv'):
super(MSRResNet_prior, self).__init__()
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
m_head = B.conv(in_nc, nc, mode='C')
m_body = [B.ResBlock(nc, nc, mode='C'+act_mode+'C') for _ in range(nb)]
m_body.append(B.conv(nc, nc, mode='C'))
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
m_uper = upsample_block(nc, nc, mode='3'+act_mode)
else:
m_uper = [upsample_block(nc, nc, mode='2'+act_mode) for _ in range(n_upscale)]
H_conv0 = B.conv(nc, nc, mode='C'+act_mode)
H_conv1 = B.conv(nc, out_nc, bias=False, mode='C')
m_tail = B.sequential(H_conv0, H_conv1)
self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper, m_tail)
def forward(self, x):
x = self.model(x)
return x
class SRResNet(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nc=64, nb=16, upscale=4, act_mode='R', upsample_mode='upconv'):
super(SRResNet, self).__init__()
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
m_head = B.conv(in_nc, nc, mode='C')
m_body = [B.ResBlock(nc, nc, mode='C'+act_mode+'C') for _ in range(nb)]
m_body.append(B.conv(nc, nc, mode='C'))
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
m_uper = upsample_block(nc, nc, mode='3'+act_mode)
else:
m_uper = [upsample_block(nc, nc, mode='2'+act_mode) for _ in range(n_upscale)]
H_conv0 = B.conv(nc, nc, mode='C'+act_mode)
H_conv1 = B.conv(nc, out_nc, bias=False, mode='C')
m_tail = B.sequential(H_conv0, H_conv1)
self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper, m_tail)
def forward(self, x):
x = self.model(x)
return x | 4,331 | 37.678571 | 218 | py |
MaskedDenoising | MaskedDenoising-main/models/model_gan.py | from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam
from models.select_network import define_G, define_D
from models.model_base import ModelBase
from models.loss import GANLoss, PerceptualLoss
from models.loss_ssim import SSIMLoss
class ModelGAN(ModelBase):
"""Train with pixel-VGG-GAN loss"""
def __init__(self, opt):
super(ModelGAN, self).__init__(opt)
# ------------------------------------
# define network
# ------------------------------------
self.opt_train = self.opt['train'] # training option
self.netG = define_G(opt)
self.netG = self.model_to_device(self.netG)
if self.is_train:
self.netD = define_D(opt)
self.netD = self.model_to_device(self.netD)
if self.opt_train['E_decay'] > 0:
self.netE = define_G(opt).to(self.device).eval()
"""
# ----------------------------------------
# Preparation before training with data
# Save model during training
# ----------------------------------------
"""
# ----------------------------------------
# initialize training
# ----------------------------------------
def init_train(self):
self.load() # load model
self.netG.train() # set training mode,for BN
self.netD.train() # set training mode,for BN
self.define_loss() # define loss
self.define_optimizer() # define optimizer
self.load_optimizers() # load optimizer
self.define_scheduler() # define scheduler
self.log_dict = OrderedDict() # log
# ----------------------------------------
# load pre-trained G and D model
# ----------------------------------------
def load(self):
load_path_G = self.opt['path']['pretrained_netG']
if load_path_G is not None:
print('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, strict=self.opt_train['G_param_strict'])
load_path_E = self.opt['path']['pretrained_netE']
if self.opt_train['E_decay'] > 0:
if load_path_E is not None:
print('Loading model for E [{:s}] ...'.format(load_path_E))
self.load_network(load_path_E, self.netE, strict=self.opt_train['E_param_strict'])
else:
print('Copying model for E')
self.update_E(0)
self.netE.eval()
load_path_D = self.opt['path']['pretrained_netD']
if self.opt['is_train'] and load_path_D is not None:
print('Loading model for D [{:s}] ...'.format(load_path_D))
self.load_network(load_path_D, self.netD, strict=self.opt_train['D_param_strict'])
# ----------------------------------------
# load optimizerG and optimizerD
# ----------------------------------------
def load_optimizers(self):
load_path_optimizerG = self.opt['path']['pretrained_optimizerG']
if load_path_optimizerG is not None and self.opt_train['G_optimizer_reuse']:
print('Loading optimizerG [{:s}] ...'.format(load_path_optimizerG))
self.load_optimizer(load_path_optimizerG, self.G_optimizer)
load_path_optimizerD = self.opt['path']['pretrained_optimizerD']
if load_path_optimizerD is not None and self.opt_train['D_optimizer_reuse']:
print('Loading optimizerD [{:s}] ...'.format(load_path_optimizerD))
self.load_optimizer(load_path_optimizerD, self.D_optimizer)
# ----------------------------------------
# save model / optimizer(optional)
# ----------------------------------------
def save(self, iter_label):
self.save_network(self.save_dir, self.netG, 'G', iter_label)
self.save_network(self.save_dir, self.netD, 'D', iter_label)
if self.opt_train['E_decay'] > 0:
self.save_network(self.save_dir, self.netE, 'E', iter_label)
if self.opt_train['G_optimizer_reuse']:
self.save_optimizer(self.save_dir, self.G_optimizer, 'optimizerG', iter_label)
if self.opt_train['D_optimizer_reuse']:
self.save_optimizer(self.save_dir, self.D_optimizer, 'optimizerD', iter_label)
# ----------------------------------------
# define loss
# ----------------------------------------
def define_loss(self):
# ------------------------------------
# 1) G_loss
# ------------------------------------
if self.opt_train['G_lossfn_weight'] > 0:
G_lossfn_type = self.opt_train['G_lossfn_type']
if G_lossfn_type == 'l1':
self.G_lossfn = nn.L1Loss().to(self.device)
elif G_lossfn_type == 'l2':
self.G_lossfn = nn.MSELoss().to(self.device)
elif G_lossfn_type == 'l2sum':
self.G_lossfn = nn.MSELoss(reduction='sum').to(self.device)
elif G_lossfn_type == 'ssim':
self.G_lossfn = SSIMLoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] is not found.'.format(G_lossfn_type))
self.G_lossfn_weight = self.opt_train['G_lossfn_weight']
else:
print('Do not use pixel loss.')
self.G_lossfn = None
# ------------------------------------
# 2) F_loss
# ------------------------------------
if self.opt_train['F_lossfn_weight'] > 0:
F_feature_layer = self.opt_train['F_feature_layer']
F_weights = self.opt_train['F_weights']
F_lossfn_type = self.opt_train['F_lossfn_type']
F_use_input_norm = self.opt_train['F_use_input_norm']
F_use_range_norm = self.opt_train['F_use_range_norm']
if self.opt['dist']:
self.F_lossfn = PerceptualLoss(feature_layer=F_feature_layer, weights=F_weights, lossfn_type=F_lossfn_type, use_input_norm=F_use_input_norm, use_range_norm=F_use_range_norm).to(self.device)
else:
self.F_lossfn = PerceptualLoss(feature_layer=F_feature_layer, weights=F_weights, lossfn_type=F_lossfn_type, use_input_norm=F_use_input_norm, use_range_norm=F_use_range_norm)
self.F_lossfn.vgg = self.model_to_device(self.F_lossfn.vgg)
self.F_lossfn.lossfn = self.F_lossfn.lossfn.to(self.device)
self.F_lossfn_weight = self.opt_train['F_lossfn_weight']
else:
print('Do not use feature loss.')
self.F_lossfn = None
# ------------------------------------
# 3) D_loss
# ------------------------------------
self.D_lossfn = GANLoss(self.opt_train['gan_type'], 1.0, 0.0).to(self.device)
self.D_lossfn_weight = self.opt_train['D_lossfn_weight']
self.D_update_ratio = self.opt_train['D_update_ratio'] if self.opt_train['D_update_ratio'] else 1
self.D_init_iters = self.opt_train['D_init_iters'] if self.opt_train['D_init_iters'] else 0
# ----------------------------------------
# define optimizer, G and D
# ----------------------------------------
def define_optimizer(self):
G_optim_params = []
for k, v in self.netG.named_parameters():
if v.requires_grad:
G_optim_params.append(v)
else:
print('Params [{:s}] will not optimize.'.format(k))
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'], weight_decay=0)
self.D_optimizer = Adam(self.netD.parameters(), lr=self.opt_train['D_optimizer_lr'], weight_decay=0)
# ----------------------------------------
# define scheduler, only "MultiStepLR"
# ----------------------------------------
def define_scheduler(self):
self.schedulers.append(lr_scheduler.MultiStepLR(self.G_optimizer,
self.opt_train['G_scheduler_milestones'],
self.opt_train['G_scheduler_gamma']
))
self.schedulers.append(lr_scheduler.MultiStepLR(self.D_optimizer,
self.opt_train['D_scheduler_milestones'],
self.opt_train['D_scheduler_gamma']
))
"""
# ----------------------------------------
# Optimization during training with data
# Testing/evaluation
# ----------------------------------------
"""
# ----------------------------------------
# feed L/H data
# ----------------------------------------
def feed_data(self, data, need_H=True):
self.L = data['L'].to(self.device)
if need_H:
self.H = data['H'].to(self.device)
# ----------------------------------------
# feed L to netG and get E
# ----------------------------------------
def netG_forward(self):
self.E = self.netG(self.L)
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
# ------------------------------------
# optimize G
# ------------------------------------
for p in self.netD.parameters():
p.requires_grad = False
self.G_optimizer.zero_grad()
self.netG_forward()
loss_G_total = 0
if current_step % self.D_update_ratio == 0 and current_step > self.D_init_iters: # updata D first
if self.opt_train['G_lossfn_weight'] > 0:
G_loss = self.G_lossfn_weight * self.G_lossfn(self.E, self.H)
loss_G_total += G_loss # 1) pixel loss
if self.opt_train['F_lossfn_weight'] > 0:
F_loss = self.F_lossfn_weight * self.F_lossfn(self.E.contiguous(), self.H.contiguous())
loss_G_total += F_loss # 2) VGG feature loss
if self.opt['train']['gan_type'] in ['gan', 'lsgan', 'wgan', 'softplusgan']:
pred_g_fake = self.netD(self.E)
D_loss = self.D_lossfn_weight * self.D_lossfn(pred_g_fake, True)
elif self.opt['train']['gan_type'] == 'ragan':
pred_d_real = self.netD(self.H).detach()
pred_g_fake = self.netD(self.E)
D_loss = self.D_lossfn_weight * (
self.D_lossfn(pred_d_real - torch.mean(pred_g_fake, 0, True), False) +
self.D_lossfn(pred_g_fake - torch.mean(pred_d_real, 0, True), True)) / 2
loss_G_total += D_loss # 3) GAN loss
loss_G_total.backward()
self.G_optimizer.step()
# ------------------------------------
# optimize D
# ------------------------------------
for p in self.netD.parameters():
p.requires_grad = True
self.D_optimizer.zero_grad()
# In order to avoid the error in distributed training:
# "Error detected in CudnnBatchNormBackward: RuntimeError: one of
# the variables needed for gradient computation has been modified by
# an inplace operation",
# we separate the backwards for real and fake, and also detach the
# tensor for calculating mean.
if self.opt_train['gan_type'] in ['gan', 'lsgan', 'wgan', 'softplusgan']:
# real
pred_d_real = self.netD(self.H) # 1) real data
l_d_real = self.D_lossfn(pred_d_real, True)
l_d_real.backward()
# fake
pred_d_fake = self.netD(self.E.detach().clone()) # 2) fake data, detach to avoid BP to G
l_d_fake = self.D_lossfn(pred_d_fake, False)
l_d_fake.backward()
elif self.opt_train['gan_type'] == 'ragan':
# real
pred_d_fake = self.netD(self.E).detach() # 1) fake data, detach to avoid BP to G
pred_d_real = self.netD(self.H) # 2) real data
l_d_real = 0.5 * self.D_lossfn(pred_d_real - torch.mean(pred_d_fake, 0, True), True)
l_d_real.backward()
# fake
pred_d_fake = self.netD(self.E.detach())
l_d_fake = 0.5 * self.D_lossfn(pred_d_fake - torch.mean(pred_d_real.detach(), 0, True), False)
l_d_fake.backward()
self.D_optimizer.step()
# ------------------------------------
# record log
# ------------------------------------
if current_step % self.D_update_ratio == 0 and current_step > self.D_init_iters:
if self.opt_train['G_lossfn_weight'] > 0:
self.log_dict['G_loss'] = G_loss.item()
if self.opt_train['F_lossfn_weight'] > 0:
self.log_dict['F_loss'] = F_loss.item()
self.log_dict['D_loss'] = D_loss.item()
#self.log_dict['l_d_real'] = l_d_real.item()
#self.log_dict['l_d_fake'] = l_d_fake.item()
self.log_dict['D_real'] = torch.mean(pred_d_real.detach())
self.log_dict['D_fake'] = torch.mean(pred_d_fake.detach())
if self.opt_train['E_decay'] > 0:
self.update_E(self.opt_train['E_decay'])
# ----------------------------------------
# test and inference
# ----------------------------------------
def test(self):
self.netG.eval()
with torch.no_grad():
self.netG_forward()
self.netG.train()
# ----------------------------------------
# get log_dict
# ----------------------------------------
def current_log(self):
return self.log_dict
# ----------------------------------------
# get L, E, H images
# ----------------------------------------
def current_visuals(self, need_H=True):
out_dict = OrderedDict()
out_dict['L'] = self.L.detach()[0].float().cpu()
out_dict['E'] = self.E.detach()[0].float().cpu()
if need_H:
out_dict['H'] = self.H.detach()[0].float().cpu()
return out_dict
"""
# ----------------------------------------
# Information of netG, netD and netF
# ----------------------------------------
"""
# ----------------------------------------
# print network
# ----------------------------------------
def print_network(self):
msg = self.describe_network(self.netG)
print(msg)
if self.is_train:
msg = self.describe_network(self.netD)
print(msg)
# ----------------------------------------
# print params
# ----------------------------------------
def print_params(self):
msg = self.describe_params(self.netG)
print(msg)
# ----------------------------------------
# network information
# ----------------------------------------
def info_network(self):
msg = self.describe_network(self.netG)
if self.is_train:
msg += self.describe_network(self.netD)
return msg
# ----------------------------------------
# params information
# ----------------------------------------
def info_params(self):
msg = self.describe_params(self.netG)
return msg
| 15,535 | 42.887006 | 205 | py |
MaskedDenoising | MaskedDenoising-main/models/network_unet.py | import torch
import torch.nn as nn
import models.basicblock as B
import numpy as np
'''
# ====================
# Residual U-Net
# ====================
citation:
@article{zhang2020plug,
title={Plug-and-Play Image Restoration with Deep Denoiser Prior},
author={Zhang, Kai and Li, Yawei and Zuo, Wangmeng and Zhang, Lei and Van Gool, Luc and Timofte, Radu},
journal={arXiv preprint},
year={2020}
}
# ====================
'''
class UNetRes(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nc=[64, 128, 256, 512], nb=4, act_mode='R', downsample_mode='strideconv', upsample_mode='convtranspose', bias=True):
super(UNetRes, self).__init__()
self.m_head = B.conv(in_nc, nc[0], bias=bias, mode='C')
# downsample
if downsample_mode == 'avgpool':
downsample_block = B.downsample_avgpool
elif downsample_mode == 'maxpool':
downsample_block = B.downsample_maxpool
elif downsample_mode == 'strideconv':
downsample_block = B.downsample_strideconv
else:
raise NotImplementedError('downsample mode [{:s}] is not found'.format(downsample_mode))
self.m_down1 = B.sequential(*[B.ResBlock(nc[0], nc[0], bias=bias, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[0], nc[1], bias=bias, mode='2'))
self.m_down2 = B.sequential(*[B.ResBlock(nc[1], nc[1], bias=bias, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[1], nc[2], bias=bias, mode='2'))
self.m_down3 = B.sequential(*[B.ResBlock(nc[2], nc[2], bias=bias, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[2], nc[3], bias=bias, mode='2'))
self.m_body = B.sequential(*[B.ResBlock(nc[3], nc[3], bias=bias, mode='C'+act_mode+'C') for _ in range(nb)])
# upsample
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
self.m_up3 = B.sequential(upsample_block(nc[3], nc[2], bias=bias, mode='2'), *[B.ResBlock(nc[2], nc[2], bias=bias, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_up2 = B.sequential(upsample_block(nc[2], nc[1], bias=bias, mode='2'), *[B.ResBlock(nc[1], nc[1], bias=bias, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_up1 = B.sequential(upsample_block(nc[1], nc[0], bias=bias, mode='2'), *[B.ResBlock(nc[0], nc[0], bias=bias, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_tail = B.conv(nc[0], out_nc, bias=bias, mode='C')
def forward(self, x0):
# h, w = x.size()[-2:]
# paddingBottom = int(np.ceil(h/8)*8-h)
# paddingRight = int(np.ceil(w/8)*8-w)
# x = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x1 = self.m_head(x0)
x2 = self.m_down1(x1)
x3 = self.m_down2(x2)
x4 = self.m_down3(x3)
x = self.m_body(x4)
x = self.m_up3(x+x4)
x = self.m_up2(x+x3)
x = self.m_up1(x+x2)
x = self.m_tail(x+x1)
# x = x[..., :h, :w]
return x
if __name__ == '__main__':
x = torch.rand(1,3,256,256)
net = UNetRes()
net.eval()
with torch.no_grad():
y = net(x)
print(y.size())
# run models/network_unet.py
| 3,484 | 38.602273 | 170 | py |
MaskedDenoising | MaskedDenoising-main/models/network_srmd.py |
import torch.nn as nn
import models.basicblock as B
import torch
"""
# --------------------------------------------
# SRMD (15 conv layers)
# --------------------------------------------
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
http://openaccess.thecvf.com/content_cvpr_2018/papers/Zhang_Learning_a_Single_CVPR_2018_paper.pdf
"""
# --------------------------------------------
# SRMD (SRMD, in_nc = 3+15+1 = 19)
# SRMD (SRMDNF, in_nc = 3+15 = 18)
# --------------------------------------------
class SRMD(nn.Module):
def __init__(self, in_nc=19, out_nc=3, nc=128, nb=12, upscale=4, act_mode='R', upsample_mode='pixelshuffle'):
"""
# ------------------------------------
in_nc: channel number of input, default: 3+15
out_nc: channel number of output
nc: channel number
nb: total number of conv layers
upscale: scale factor
act_mode: batch norm + activation function; 'BR' means BN+ReLU
upsample_mode: default 'pixelshuffle' = conv + pixelshuffle
# ------------------------------------
"""
super(SRMD, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
bias = True
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
m_head = B.conv(in_nc, nc, mode='C'+act_mode[-1], bias=bias)
m_body = [B.conv(nc, nc, mode='C'+act_mode, bias=bias) for _ in range(nb-2)]
m_tail = upsample_block(nc, out_nc, mode=str(upscale), bias=bias)
self.model = B.sequential(m_head, *m_body, m_tail)
# def forward(self, x, k_pca):
# m = k_pca.repeat(1, 1, x.size()[-2], x.size()[-1])
# x = torch.cat((x, m), 1)
# x = self.body(x)
def forward(self, x):
x = self.model(x)
return x
if __name__ == '__main__':
from utils import utils_model
model = SRMD(in_nc=18, out_nc=3, nc=64, nb=15, upscale=4, act_mode='R', upsample_mode='pixelshuffle')
print(utils_model.describe_model(model))
x = torch.randn((2, 3, 100, 100))
k_pca = torch.randn(2, 15, 1, 1)
x = model(x, k_pca)
print(x.shape)
# run models/network_srmd.py
| 2,804 | 33.207317 | 113 | py |
MaskedDenoising | MaskedDenoising-main/models/network_discriminator.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn.utils import spectral_norm
import models.basicblock as B
import functools
import numpy as np
"""
# --------------------------------------------
# Discriminator_PatchGAN
# Discriminator_UNet
# --------------------------------------------
"""
# --------------------------------------------
# PatchGAN discriminator
# If n_layers = 3, then the receptive field is 70x70
# --------------------------------------------
class Discriminator_PatchGAN(nn.Module):
def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_type='spectral'):
'''PatchGAN discriminator, receptive field = 70x70 if n_layers = 3
Args:
input_nc: number of input channels
ndf: base channel number
n_layers: number of conv layer with stride 2
norm_type: 'batch', 'instance', 'spectral', 'batchspectral', instancespectral'
Returns:
tensor: score
'''
super(Discriminator_PatchGAN, self).__init__()
self.n_layers = n_layers
norm_layer = self.get_norm_layer(norm_type=norm_type)
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
sequence = [[self.use_spectral_norm(nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), norm_type), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[self.use_spectral_norm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw), norm_type),
norm_layer(nf),
nn.LeakyReLU(0.2, True)]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[self.use_spectral_norm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw), norm_type),
norm_layer(nf),
nn.LeakyReLU(0.2, True)]]
sequence += [[self.use_spectral_norm(nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw), norm_type)]]
self.model = nn.Sequential()
for n in range(len(sequence)):
self.model.add_module('child' + str(n), nn.Sequential(*sequence[n]))
self.model.apply(self.weights_init)
def use_spectral_norm(self, module, norm_type='spectral'):
if 'spectral' in norm_type:
return spectral_norm(module)
return module
def get_norm_layer(self, norm_type='instance'):
if 'batch' in norm_type:
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif 'instance' in norm_type:
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
norm_layer = functools.partial(nn.Identity)
return norm_layer
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def forward(self, x):
return self.model(x)
class Discriminator_UNet(nn.Module):
"""Defines a U-Net discriminator with spectral normalization (SN)"""
def __init__(self, input_nc=3, ndf=64):
super(Discriminator_UNet, self).__init__()
norm = spectral_norm
self.conv0 = nn.Conv2d(input_nc, ndf, kernel_size=3, stride=1, padding=1)
self.conv1 = norm(nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False))
self.conv2 = norm(nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False))
self.conv3 = norm(nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False))
# upsample
self.conv4 = norm(nn.Conv2d(ndf * 8, ndf * 4, 3, 1, 1, bias=False))
self.conv5 = norm(nn.Conv2d(ndf * 4, ndf * 2, 3, 1, 1, bias=False))
self.conv6 = norm(nn.Conv2d(ndf * 2, ndf, 3, 1, 1, bias=False))
# extra
self.conv7 = norm(nn.Conv2d(ndf, ndf, 3, 1, 1, bias=False))
self.conv8 = norm(nn.Conv2d(ndf, ndf, 3, 1, 1, bias=False))
self.conv9 = nn.Conv2d(ndf, 1, 3, 1, 1)
print('using the UNet discriminator')
def forward(self, x):
x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
# upsample
x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False)
x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
x4 = x4 + x2
x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False)
x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
x5 = x5 + x1
x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False)
x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
x6 = x6 + x0
# extra
out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
out = self.conv9(out)
return out
# --------------------------------------------
# VGG style Discriminator with 96x96 input
# --------------------------------------------
class Discriminator_VGG_96(nn.Module):
def __init__(self, in_nc=3, base_nc=64, ac_type='BL'):
super(Discriminator_VGG_96, self).__init__()
# features
# hxw, c
# 96, 64
conv0 = B.conv(in_nc, base_nc, kernel_size=3, mode='C')
conv1 = B.conv(base_nc, base_nc, kernel_size=4, stride=2, mode='C'+ac_type)
# 48, 64
conv2 = B.conv(base_nc, base_nc*2, kernel_size=3, stride=1, mode='C'+ac_type)
conv3 = B.conv(base_nc*2, base_nc*2, kernel_size=4, stride=2, mode='C'+ac_type)
# 24, 128
conv4 = B.conv(base_nc*2, base_nc*4, kernel_size=3, stride=1, mode='C'+ac_type)
conv5 = B.conv(base_nc*4, base_nc*4, kernel_size=4, stride=2, mode='C'+ac_type)
# 12, 256
conv6 = B.conv(base_nc*4, base_nc*8, kernel_size=3, stride=1, mode='C'+ac_type)
conv7 = B.conv(base_nc*8, base_nc*8, kernel_size=4, stride=2, mode='C'+ac_type)
# 6, 512
conv8 = B.conv(base_nc*8, base_nc*8, kernel_size=3, stride=1, mode='C'+ac_type)
conv9 = B.conv(base_nc*8, base_nc*8, kernel_size=4, stride=2, mode='C'+ac_type)
# 3, 512
self.features = B.sequential(conv0, conv1, conv2, conv3, conv4,
conv5, conv6, conv7, conv8, conv9)
# classifier
self.classifier = nn.Sequential(
nn.Linear(512 * 3 * 3, 100), nn.LeakyReLU(0.2, True), nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# --------------------------------------------
# VGG style Discriminator with 128x128 input
# --------------------------------------------
class Discriminator_VGG_128(nn.Module):
def __init__(self, in_nc=3, base_nc=64, ac_type='BL'):
super(Discriminator_VGG_128, self).__init__()
# features
# hxw, c
# 128, 64
conv0 = B.conv(in_nc, base_nc, kernel_size=3, mode='C')
conv1 = B.conv(base_nc, base_nc, kernel_size=4, stride=2, mode='C'+ac_type)
# 64, 64
conv2 = B.conv(base_nc, base_nc*2, kernel_size=3, stride=1, mode='C'+ac_type)
conv3 = B.conv(base_nc*2, base_nc*2, kernel_size=4, stride=2, mode='C'+ac_type)
# 32, 128
conv4 = B.conv(base_nc*2, base_nc*4, kernel_size=3, stride=1, mode='C'+ac_type)
conv5 = B.conv(base_nc*4, base_nc*4, kernel_size=4, stride=2, mode='C'+ac_type)
# 16, 256
conv6 = B.conv(base_nc*4, base_nc*8, kernel_size=3, stride=1, mode='C'+ac_type)
conv7 = B.conv(base_nc*8, base_nc*8, kernel_size=4, stride=2, mode='C'+ac_type)
# 8, 512
conv8 = B.conv(base_nc*8, base_nc*8, kernel_size=3, stride=1, mode='C'+ac_type)
conv9 = B.conv(base_nc*8, base_nc*8, kernel_size=4, stride=2, mode='C'+ac_type)
# 4, 512
self.features = B.sequential(conv0, conv1, conv2, conv3, conv4,
conv5, conv6, conv7, conv8, conv9)
# classifier
self.classifier = nn.Sequential(nn.Linear(512 * 4 * 4, 100),
nn.LeakyReLU(0.2, True),
nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# --------------------------------------------
# VGG style Discriminator with 192x192 input
# --------------------------------------------
class Discriminator_VGG_192(nn.Module):
def __init__(self, in_nc=3, base_nc=64, ac_type='BL'):
super(Discriminator_VGG_192, self).__init__()
# features
# hxw, c
# 192, 64
conv0 = B.conv(in_nc, base_nc, kernel_size=3, mode='C')
conv1 = B.conv(base_nc, base_nc, kernel_size=4, stride=2, mode='C'+ac_type)
# 96, 64
conv2 = B.conv(base_nc, base_nc*2, kernel_size=3, stride=1, mode='C'+ac_type)
conv3 = B.conv(base_nc*2, base_nc*2, kernel_size=4, stride=2, mode='C'+ac_type)
# 48, 128
conv4 = B.conv(base_nc*2, base_nc*4, kernel_size=3, stride=1, mode='C'+ac_type)
conv5 = B.conv(base_nc*4, base_nc*4, kernel_size=4, stride=2, mode='C'+ac_type)
# 24, 256
conv6 = B.conv(base_nc*4, base_nc*8, kernel_size=3, stride=1, mode='C'+ac_type)
conv7 = B.conv(base_nc*8, base_nc*8, kernel_size=4, stride=2, mode='C'+ac_type)
# 12, 512
conv8 = B.conv(base_nc*8, base_nc*8, kernel_size=3, stride=1, mode='C'+ac_type)
conv9 = B.conv(base_nc*8, base_nc*8, kernel_size=4, stride=2, mode='C'+ac_type)
# 6, 512
conv10 = B.conv(base_nc*8, base_nc*8, kernel_size=3, stride=1, mode='C'+ac_type)
conv11 = B.conv(base_nc*8, base_nc*8, kernel_size=4, stride=2, mode='C'+ac_type)
# 3, 512
self.features = B.sequential(conv0, conv1, conv2, conv3, conv4, conv5,
conv6, conv7, conv8, conv9, conv10, conv11)
# classifier
self.classifier = nn.Sequential(nn.Linear(512 * 3 * 3, 100),
nn.LeakyReLU(0.2, True),
nn.Linear(100, 1))
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
# --------------------------------------------
# SN-VGG style Discriminator with 128x128 input
# --------------------------------------------
class Discriminator_VGG_128_SN(nn.Module):
def __init__(self):
super(Discriminator_VGG_128_SN, self).__init__()
# features
# hxw, c
# 128, 64
self.lrelu = nn.LeakyReLU(0.2, True)
self.conv0 = spectral_norm(nn.Conv2d(3, 64, 3, 1, 1))
self.conv1 = spectral_norm(nn.Conv2d(64, 64, 4, 2, 1))
# 64, 64
self.conv2 = spectral_norm(nn.Conv2d(64, 128, 3, 1, 1))
self.conv3 = spectral_norm(nn.Conv2d(128, 128, 4, 2, 1))
# 32, 128
self.conv4 = spectral_norm(nn.Conv2d(128, 256, 3, 1, 1))
self.conv5 = spectral_norm(nn.Conv2d(256, 256, 4, 2, 1))
# 16, 256
self.conv6 = spectral_norm(nn.Conv2d(256, 512, 3, 1, 1))
self.conv7 = spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 8, 512
self.conv8 = spectral_norm(nn.Conv2d(512, 512, 3, 1, 1))
self.conv9 = spectral_norm(nn.Conv2d(512, 512, 4, 2, 1))
# 4, 512
# classifier
self.linear0 = spectral_norm(nn.Linear(512 * 4 * 4, 100))
self.linear1 = spectral_norm(nn.Linear(100, 1))
def forward(self, x):
x = self.lrelu(self.conv0(x))
x = self.lrelu(self.conv1(x))
x = self.lrelu(self.conv2(x))
x = self.lrelu(self.conv3(x))
x = self.lrelu(self.conv4(x))
x = self.lrelu(self.conv5(x))
x = self.lrelu(self.conv6(x))
x = self.lrelu(self.conv7(x))
x = self.lrelu(self.conv8(x))
x = self.lrelu(self.conv9(x))
x = x.view(x.size(0), -1)
x = self.lrelu(self.linear0(x))
x = self.linear1(x)
return x
if __name__ == '__main__':
x = torch.rand(1, 3, 96, 96)
net = Discriminator_VGG_96()
net.eval()
with torch.no_grad():
y = net(x)
print(y.size())
x = torch.rand(1, 3, 128, 128)
net = Discriminator_VGG_128()
net.eval()
with torch.no_grad():
y = net(x)
print(y.size())
x = torch.rand(1, 3, 192, 192)
net = Discriminator_VGG_192()
net.eval()
with torch.no_grad():
y = net(x)
print(y.size())
x = torch.rand(1, 3, 128, 128)
net = Discriminator_VGG_128_SN()
net.eval()
with torch.no_grad():
y = net(x)
print(y.size())
# run models/network_discriminator.py
| 13,231 | 38.032448 | 147 | py |
MaskedDenoising | MaskedDenoising-main/models/network_vrt.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import os
import warnings
import math
import torch
import torch.nn as nn
import torchvision
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from distutils.version import LooseVersion
from torch.nn.modules.utils import _pair, _single
import numpy as np
from functools import reduce, lru_cache
from operator import mul
from einops import rearrange
from einops.layers.torch import Rearrange
class ModulatedDeformConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True):
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
# enable compatibility with nn.Conv2d
self.transposed = False
self.output_padding = _single(0)
self.weight = nn.Parameter(torch.Tensor(out_channels, in_channels // groups, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.init_weights()
def init_weights(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
# def forward(self, x, offset, mask):
# return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation,
# self.groups, self.deformable_groups)
class ModulatedDeformConvPack(ModulatedDeformConv):
"""A ModulatedDeformable Conv Encapsulation that acts as normal Conv layers.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
"""
_version = 2
def __init__(self, *args, **kwargs):
super(ModulatedDeformConvPack, self).__init__(*args, **kwargs)
self.conv_offset = nn.Conv2d(
self.in_channels,
self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
dilation=_pair(self.dilation),
bias=True)
self.init_weights()
def init_weights(self):
super(ModulatedDeformConvPack, self).init_weights()
if hasattr(self, 'conv_offset'):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
# def forward(self, x):
# out = self.conv_offset(x)
# o1, o2, mask = torch.chunk(out, 3, dim=1)
# offset = torch.cat((o1, o2), dim=1)
# mask = torch.sigmoid(mask)
# return modulated_deform_conv(x, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation,
# self.groups, self.deformable_groups)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
'The distribution of values may be incorrect.',
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
low = norm_cdf((a - mean) / std)
up = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [low, up], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * low - 1, 2 * up - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution.
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0], ) + (1, ) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True, use_pad_mask=False):
"""Warp an image or feature map with optical flow.
Args:
x (Tensor): Tensor with size (n, c, h, w).
flow (Tensor): Tensor with size (n, h, w, 2), normal value.
interp_mode (str): 'nearest' or 'bilinear' or 'nearest4'. Default: 'bilinear'.
padding_mode (str): 'zeros' or 'border' or 'reflection'.
Default: 'zeros'.
align_corners (bool): Before pytorch 1.3, the default value is
align_corners=True. After pytorch 1.3, the default value is
align_corners=False. Here, we use the True as default.
use_pad_mask (bool): only used for PWCNet, x is first padded with ones along the channel dimension.
The mask is generated according to the grid_sample results of the padded dimension.
Returns:
Tensor: Warped image or feature map.
"""
# assert x.size()[-2:] == flow.size()[1:3] # temporaily turned off for image-wise shift
n, _, h, w = x.size()
# create mesh grid
# grid_y, grid_x = torch.meshgrid(torch.arange(0, h).type_as(x), torch.arange(0, w).type_as(x)) # an illegal memory access on TITAN RTX + PyTorch1.9.1
grid_y, grid_x = torch.meshgrid(torch.arange(0, h, dtype=x.dtype, device=x.device), torch.arange(0, w, dtype=x.dtype, device=x.device))
grid = torch.stack((grid_x, grid_y), 2).float() # W(x), H(y), 2
grid.requires_grad = False
vgrid = grid + flow
# if use_pad_mask: # for PWCNet
# x = F.pad(x, (0,0,0,0,0,1), mode='constant', value=1)
# scale grid to [-1,1]
if interp_mode == 'nearest4': # todo: bug, no gradient for flow model in this case!!! but the result is good
vgrid_x_floor = 2.0 * torch.floor(vgrid[:, :, :, 0]) / max(w - 1, 1) - 1.0
vgrid_x_ceil = 2.0 * torch.ceil(vgrid[:, :, :, 0]) / max(w - 1, 1) - 1.0
vgrid_y_floor = 2.0 * torch.floor(vgrid[:, :, :, 1]) / max(h - 1, 1) - 1.0
vgrid_y_ceil = 2.0 * torch.ceil(vgrid[:, :, :, 1]) / max(h - 1, 1) - 1.0
output00 = F.grid_sample(x, torch.stack((vgrid_x_floor, vgrid_y_floor), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)
output01 = F.grid_sample(x, torch.stack((vgrid_x_floor, vgrid_y_ceil), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)
output10 = F.grid_sample(x, torch.stack((vgrid_x_ceil, vgrid_y_floor), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)
output11 = F.grid_sample(x, torch.stack((vgrid_x_ceil, vgrid_y_ceil), dim=3), mode='nearest', padding_mode=padding_mode, align_corners=align_corners)
return torch.cat([output00, output01, output10, output11], 1)
else:
vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
vgrid_scaled = torch.stack((vgrid_x, vgrid_y), dim=3)
output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners)
# if use_pad_mask: # for PWCNet
# output = _flow_warp_masking(output)
# TODO, what if align_corners=False
return output
class DCNv2PackFlowGuided(ModulatedDeformConvPack):
"""Flow-guided deformable alignment module.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
max_residue_magnitude (int): The maximum magnitude of the offset residue. Default: 10.
pa_frames (int): The number of parallel warping frames. Default: 2.
Ref:
BasicVSR++: Improving Video Super-Resolution with Enhanced Propagation and Alignment.
"""
def __init__(self, *args, **kwargs):
self.max_residue_magnitude = kwargs.pop('max_residue_magnitude', 10)
self.pa_frames = kwargs.pop('pa_frames', 2)
super(DCNv2PackFlowGuided, self).__init__(*args, **kwargs)
self.conv_offset = nn.Sequential(
nn.Conv2d((1+self.pa_frames//2) * self.in_channels + self.pa_frames, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, 3 * 9 * self.deformable_groups, 3, 1, 1),
)
self.init_offset()
def init_offset(self):
super(ModulatedDeformConvPack, self).init_weights()
if hasattr(self, 'conv_offset'):
self.conv_offset[-1].weight.data.zero_()
self.conv_offset[-1].bias.data.zero_()
def forward(self, x, x_flow_warpeds, x_current, flows):
out = self.conv_offset(torch.cat(x_flow_warpeds + [x_current] + flows, dim=1))
o1, o2, mask = torch.chunk(out, 3, dim=1)
# offset
offset = self.max_residue_magnitude * torch.tanh(torch.cat((o1, o2), dim=1))
if self.pa_frames == 2:
offset = offset + flows[0].flip(1).repeat(1, offset.size(1)//2, 1, 1)
elif self.pa_frames == 4:
offset1, offset2 = torch.chunk(offset, 2, dim=1)
offset1 = offset1 + flows[0].flip(1).repeat(1, offset1.size(1) // 2, 1, 1)
offset2 = offset2 + flows[1].flip(1).repeat(1, offset2.size(1) // 2, 1, 1)
offset = torch.cat([offset1, offset2], dim=1)
elif self.pa_frames == 6:
offset = self.max_residue_magnitude * torch.tanh(torch.cat((o1, o2), dim=1))
offset1, offset2, offset3 = torch.chunk(offset, 3, dim=1)
offset1 = offset1 + flows[0].flip(1).repeat(1, offset1.size(1) // 2, 1, 1)
offset2 = offset2 + flows[1].flip(1).repeat(1, offset2.size(1) // 2, 1, 1)
offset3 = offset3 + flows[2].flip(1).repeat(1, offset3.size(1) // 2, 1, 1)
offset = torch.cat([offset1, offset2, offset3], dim=1)
# mask
mask = torch.sigmoid(mask)
return torchvision.ops.deform_conv2d(x, offset, self.weight, self.bias, self.stride, self.padding,
self.dilation, mask)
class BasicModule(nn.Module):
"""Basic Module for SpyNet.
"""
def __init__(self):
super(BasicModule, self).__init__()
self.basic_module = nn.Sequential(
nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3), nn.ReLU(inplace=False),
nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3))
def forward(self, tensor_input):
return self.basic_module(tensor_input)
class SpyNet(nn.Module):
"""SpyNet architecture.
Args:
load_path (str): path for pretrained SpyNet. Default: None.
return_levels (list[int]): return flows of different levels. Default: [5].
"""
def __init__(self, load_path=None, return_levels=[5]):
super(SpyNet, self).__init__()
self.return_levels = return_levels
self.basic_module = nn.ModuleList([BasicModule() for _ in range(6)])
if load_path:
if not os.path.exists(load_path):
import requests
url = 'https://github.com/JingyunLiang/VRT/releases/download/v0.0/spynet_sintel_final-3d2a1287.pth'
r = requests.get(url, allow_redirects=True)
print(f'downloading SpyNet pretrained model from {url}')
os.makedirs(os.path.dirname(load_path), exist_ok=True)
open(load_path, 'wb').write(r.content)
self.load_state_dict(torch.load(load_path, map_location=lambda storage, loc: storage)['params'])
self.register_buffer('mean', torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer('std', torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def preprocess(self, tensor_input):
tensor_output = (tensor_input - self.mean) / self.std
return tensor_output
def process(self, ref, supp, w, h, w_floor, h_floor):
flow_list = []
ref = [self.preprocess(ref)]
supp = [self.preprocess(supp)]
for level in range(5):
ref.insert(0, F.avg_pool2d(input=ref[0], kernel_size=2, stride=2, count_include_pad=False))
supp.insert(0, F.avg_pool2d(input=supp[0], kernel_size=2, stride=2, count_include_pad=False))
flow = ref[0].new_zeros(
[ref[0].size(0), 2,
int(math.floor(ref[0].size(2) / 2.0)),
int(math.floor(ref[0].size(3) / 2.0))])
for level in range(len(ref)):
upsampled_flow = F.interpolate(input=flow, scale_factor=2, mode='bilinear', align_corners=True) * 2.0
if upsampled_flow.size(2) != ref[level].size(2):
upsampled_flow = F.pad(input=upsampled_flow, pad=[0, 0, 0, 1], mode='replicate')
if upsampled_flow.size(3) != ref[level].size(3):
upsampled_flow = F.pad(input=upsampled_flow, pad=[0, 1, 0, 0], mode='replicate')
flow = self.basic_module[level](torch.cat([
ref[level],
flow_warp(
supp[level], upsampled_flow.permute(0, 2, 3, 1), interp_mode='bilinear', padding_mode='border'),
upsampled_flow
], 1)) + upsampled_flow
if level in self.return_levels:
scale = 2**(5-level) # level=5 (scale=1), level=4 (scale=2), level=3 (scale=4), level=2 (scale=8)
flow_out = F.interpolate(input=flow, size=(h//scale, w//scale), mode='bilinear', align_corners=False)
flow_out[:, 0, :, :] *= float(w//scale) / float(w_floor//scale)
flow_out[:, 1, :, :] *= float(h//scale) / float(h_floor//scale)
flow_list.insert(0, flow_out)
return flow_list
def forward(self, ref, supp):
assert ref.size() == supp.size()
h, w = ref.size(2), ref.size(3)
w_floor = math.floor(math.ceil(w / 32.0) * 32.0)
h_floor = math.floor(math.ceil(h / 32.0) * 32.0)
ref = F.interpolate(input=ref, size=(h_floor, w_floor), mode='bilinear', align_corners=False)
supp = F.interpolate(input=supp, size=(h_floor, w_floor), mode='bilinear', align_corners=False)
flow_list = self.process(ref, supp, w, h, w_floor, h_floor)
return flow_list[0] if len(flow_list) == 1 else flow_list
def window_partition(x, window_size):
""" Partition the input into windows. Attention will be conducted within the windows.
Args:
x: (B, D, H, W, C)
window_size (tuple[int]): window size
Returns:
windows: (B*num_windows, window_size*window_size, C)
"""
B, D, H, W, C = x.shape
x = x.view(B, D // window_size[0], window_size[0], H // window_size[1], window_size[1], W // window_size[2],
window_size[2], C)
windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, reduce(mul, window_size), C)
return windows
def window_reverse(windows, window_size, B, D, H, W):
""" Reverse windows back to the original input. Attention was conducted within the windows.
Args:
windows: (B*num_windows, window_size, window_size, C)
window_size (tuple[int]): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, D, H, W, C)
"""
x = windows.view(B, D // window_size[0], H // window_size[1], W // window_size[2], window_size[0], window_size[1],
window_size[2], -1)
x = x.permute(0, 1, 4, 2, 5, 3, 6, 7).contiguous().view(B, D, H, W, -1)
return x
def get_window_size(x_size, window_size, shift_size=None):
""" Get the window size and the shift size """
use_window_size = list(window_size)
if shift_size is not None:
use_shift_size = list(shift_size)
for i in range(len(x_size)):
if x_size[i] <= window_size[i]:
use_window_size[i] = x_size[i]
if shift_size is not None:
use_shift_size[i] = 0
if shift_size is None:
return tuple(use_window_size)
else:
return tuple(use_window_size), tuple(use_shift_size)
@lru_cache()
def compute_mask(D, H, W, window_size, shift_size, device):
""" Compute attnetion mask for input of size (D, H, W). @lru_cache caches each stage results. """
img_mask = torch.zeros((1, D, H, W, 1), device=device) # 1 Dp Hp Wp 1
cnt = 0
for d in slice(-window_size[0]), slice(-window_size[0], -shift_size[0]), slice(-shift_size[0], None):
for h in slice(-window_size[1]), slice(-window_size[1], -shift_size[1]), slice(-shift_size[1], None):
for w in slice(-window_size[2]), slice(-window_size[2], -shift_size[2]), slice(-shift_size[2], None):
img_mask[:, d, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, window_size) # nW, ws[0]*ws[1]*ws[2], 1
mask_windows = mask_windows.squeeze(-1) # nW, ws[0]*ws[1]*ws[2]
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
class Upsample(nn.Sequential):
"""Upsample module for video SR.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
assert LooseVersion(torch.__version__) >= LooseVersion('1.8.1'), \
'PyTorch version >= 1.8.1 to support 5D PixelShuffle.'
class Transpose_Dim12(nn.Module):
""" Transpose Dim1 and Dim2 of a tensor."""
def __init__(self):
super().__init__()
def forward(self, x):
return x.transpose(1, 2)
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv3d(num_feat, 4 * num_feat, kernel_size=(1, 3, 3), padding=(0, 1, 1)))
m.append(Transpose_Dim12())
m.append(nn.PixelShuffle(2))
m.append(Transpose_Dim12())
m.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
m.append(nn.Conv3d(num_feat, num_feat, kernel_size=(1, 3, 3), padding=(0, 1, 1)))
elif scale == 3:
m.append(nn.Conv3d(num_feat, 9 * num_feat, kernel_size=(1, 3, 3), padding=(0, 1, 1)))
m.append(Transpose_Dim12())
m.append(nn.PixelShuffle(3))
m.append(Transpose_Dim12())
m.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
m.append(nn.Conv3d(num_feat, num_feat, kernel_size=(1, 3, 3), padding=(0, 1, 1)))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
class Mlp_GEGLU(nn.Module):
""" Multilayer perceptron with gated linear unit (GEGLU). Ref. "GLU Variants Improve Transformer".
Args:
x: (B, D, H, W, C)
Returns:
x: (B, D, H, W, C)
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc11 = nn.Linear(in_features, hidden_features)
self.fc12 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.act(self.fc11(x)) * self.fc12(x)
x = self.drop(x)
x = self.fc2(x)
return x
class WindowAttention(nn.Module):
""" Window based multi-head mutual attention and self attention.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The temporal length, height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
mut_attn (bool): If True, add mutual attention to the module. Default: True
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=False, qk_scale=None, mut_attn=True):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.mut_attn = mut_attn
# self attention with relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1) * (2 * window_size[2] - 1),
num_heads)) # 2*Wd-1 * 2*Wh-1 * 2*Ww-1, nH
self.register_buffer("relative_position_index", self.get_position_index(window_size))
self.qkv_self = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
# mutual attention with sine position encoding
if self.mut_attn:
self.register_buffer("position_bias",
self.get_sine_position_encoding(window_size[1:], dim // 2, normalize=True))
self.qkv_mut = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(2 * dim, dim)
self.softmax = nn.Softmax(dim=-1)
trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self, x, mask=None):
""" Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, N, N) or None
"""
# self attention
B_, N, C = x.shape
qkv = self.qkv_self(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # B_, nH, N, C
x_out = self.attention(q, k, v, mask, (B_, N, C), relative_position_encoding=True)
# mutual attention
if self.mut_attn:
qkv = self.qkv_mut(x + self.position_bias.repeat(1, 2, 1)).reshape(B_, N, 3, self.num_heads,
C // self.num_heads).permute(2, 0, 3, 1,
4)
(q1, q2), (k1, k2), (v1, v2) = torch.chunk(qkv[0], 2, dim=2), torch.chunk(qkv[1], 2, dim=2), torch.chunk(
qkv[2], 2, dim=2) # B_, nH, N/2, C
x1_aligned = self.attention(q2, k1, v1, mask, (B_, N // 2, C), relative_position_encoding=False)
x2_aligned = self.attention(q1, k2, v2, mask, (B_, N // 2, C), relative_position_encoding=False)
x_out = torch.cat([torch.cat([x1_aligned, x2_aligned], 1), x_out], 2)
# projection
x = self.proj(x_out)
return x
def attention(self, q, k, v, mask, x_shape, relative_position_encoding=True):
B_, N, C = x_shape
attn = (q * self.scale) @ k.transpose(-2, -1)
if relative_position_encoding:
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index[:N, :N].reshape(-1)].reshape(N, N, -1) # Wd*Wh*Ww, Wd*Wh*Ww,nH
attn = attn + relative_position_bias.permute(2, 0, 1).unsqueeze(0) # B_, nH, N, N
if mask is None:
attn = self.softmax(attn)
else:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask[:, :N, :N].unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
return x
def get_position_index(self, window_size):
''' Get pair-wise relative position index for each token inside the window. '''
coords_d = torch.arange(window_size[0])
coords_h = torch.arange(window_size[1])
coords_w = torch.arange(window_size[2])
coords = torch.stack(torch.meshgrid(coords_d, coords_h, coords_w)) # 3, Wd, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 3, Wd*Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 3, Wd*Wh*Ww, Wd*Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wd*Wh*Ww, Wd*Wh*Ww, 3
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 2] += window_size[2] - 1
relative_coords[:, :, 0] *= (2 * window_size[1] - 1) * (2 * window_size[2] - 1)
relative_coords[:, :, 1] *= (2 * window_size[2] - 1)
relative_position_index = relative_coords.sum(-1) # Wd*Wh*Ww, Wd*Wh*Ww
return relative_position_index
def get_sine_position_encoding(self, HW, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
""" Get sine position encoding """
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
not_mask = torch.ones([1, HW[0], HW[1]])
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
# BxCxHxW
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_embed = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos_embed.flatten(2).permute(0, 2, 1).contiguous()
class TMSA(nn.Module):
""" Temporal Mutual Self Attention (TMSA).
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
num_heads (int): Number of attention heads.
window_size (tuple[int]): Window size.
shift_size (tuple[int]): Shift size for mutual and self attention.
mut_attn (bool): If True, use mutual and self attention. Default: True.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True.
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop_path (float, optional): Stochastic depth rate. Default: 0.0.
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm.
use_checkpoint_attn (bool): If True, use torch.checkpoint for attention modules. Default: False.
use_checkpoint_ffn (bool): If True, use torch.checkpoint for feed-forward modules. Default: False.
"""
def __init__(self,
dim,
input_resolution,
num_heads,
window_size=(6, 8, 8),
shift_size=(0, 0, 0),
mut_attn=True,
mlp_ratio=2.,
qkv_bias=True,
qk_scale=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
use_checkpoint_attn=False,
use_checkpoint_ffn=False
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.use_checkpoint_attn = use_checkpoint_attn
self.use_checkpoint_ffn = use_checkpoint_ffn
assert 0 <= self.shift_size[0] < self.window_size[0], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[1] < self.window_size[1], "shift_size must in 0-window_size"
assert 0 <= self.shift_size[2] < self.window_size[2], "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(dim, window_size=self.window_size, num_heads=num_heads, qkv_bias=qkv_bias,
qk_scale=qk_scale, mut_attn=mut_attn)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp_GEGLU(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer)
def forward_part1(self, x, mask_matrix):
B, D, H, W, C = x.shape
window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size)
x = self.norm1(x)
# pad feature maps to multiples of window size
pad_l = pad_t = pad_d0 = 0
pad_d1 = (window_size[0] - D % window_size[0]) % window_size[0]
pad_b = (window_size[1] - H % window_size[1]) % window_size[1]
pad_r = (window_size[2] - W % window_size[2]) % window_size[2]
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b, pad_d0, pad_d1), mode='constant')
_, Dp, Hp, Wp, _ = x.shape
# cyclic shift
if any(i > 0 for i in shift_size):
shifted_x = torch.roll(x, shifts=(-shift_size[0], -shift_size[1], -shift_size[2]), dims=(1, 2, 3))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(shifted_x, window_size) # B*nW, Wd*Wh*Ww, C
# attention / shifted attention
attn_windows = self.attn(x_windows, mask=attn_mask) # B*nW, Wd*Wh*Ww, C
# merge windows
attn_windows = attn_windows.view(-1, *(window_size + (C,)))
shifted_x = window_reverse(attn_windows, window_size, B, Dp, Hp, Wp) # B D' H' W' C
# reverse cyclic shift
if any(i > 0 for i in shift_size):
x = torch.roll(shifted_x, shifts=(shift_size[0], shift_size[1], shift_size[2]), dims=(1, 2, 3))
else:
x = shifted_x
if pad_d1 > 0 or pad_r > 0 or pad_b > 0:
x = x[:, :D, :H, :W, :]
x = self.drop_path(x)
return x
def forward_part2(self, x):
return self.drop_path(self.mlp(self.norm2(x)))
def forward(self, x, mask_matrix):
""" Forward function.
Args:
x: Input feature, tensor size (B, D, H, W, C).
mask_matrix: Attention mask for cyclic shift.
"""
# attention
if self.use_checkpoint_attn:
x = x + checkpoint.checkpoint(self.forward_part1, x, mask_matrix)
else:
x = x + self.forward_part1(x, mask_matrix)
# feed-forward
if self.use_checkpoint_ffn:
x = x + checkpoint.checkpoint(self.forward_part2, x)
else:
x = x + self.forward_part2(x)
return x
class TMSAG(nn.Module):
""" Temporal Mutual Self Attention Group (TMSAG).
Args:
dim (int): Number of feature channels
input_resolution (tuple[int]): Input resolution.
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (tuple[int]): Local window size. Default: (6,8,8).
shift_size (tuple[int]): Shift size for mutual and self attention. Default: None.
mut_attn (bool): If True, use mutual and self attention. Default: True.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 2.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
use_checkpoint_attn (bool): If True, use torch.checkpoint for attention modules. Default: False.
use_checkpoint_ffn (bool): If True, use torch.checkpoint for feed-forward modules. Default: False.
"""
def __init__(self,
dim,
input_resolution,
depth,
num_heads,
window_size=[6, 8, 8],
shift_size=None,
mut_attn=True,
mlp_ratio=2.,
qkv_bias=False,
qk_scale=None,
drop_path=0.,
norm_layer=nn.LayerNorm,
use_checkpoint_attn=False,
use_checkpoint_ffn=False
):
super().__init__()
self.input_resolution = input_resolution
self.window_size = window_size
self.shift_size = list(i // 2 for i in window_size) if shift_size is None else shift_size
# build blocks
self.blocks = nn.ModuleList([
TMSA(
dim=dim,
input_resolution=input_resolution,
num_heads=num_heads,
window_size=window_size,
shift_size=[0, 0, 0] if i % 2 == 0 else self.shift_size,
mut_attn=mut_attn,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
use_checkpoint_attn=use_checkpoint_attn,
use_checkpoint_ffn=use_checkpoint_ffn
)
for i in range(depth)])
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, C, D, H, W).
"""
# calculate attention mask for attention
B, C, D, H, W = x.shape
window_size, shift_size = get_window_size((D, H, W), self.window_size, self.shift_size)
x = rearrange(x, 'b c d h w -> b d h w c')
Dp = int(np.ceil(D / window_size[0])) * window_size[0]
Hp = int(np.ceil(H / window_size[1])) * window_size[1]
Wp = int(np.ceil(W / window_size[2])) * window_size[2]
attn_mask = compute_mask(Dp, Hp, Wp, window_size, shift_size, x.device)
for blk in self.blocks:
x = blk(x, attn_mask)
x = x.view(B, D, H, W, -1)
x = rearrange(x, 'b d h w c -> b c d h w')
return x
class RTMSA(nn.Module):
""" Residual Temporal Mutual Self Attention (RTMSA). Only used in stage 8.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True.
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm.
use_checkpoint_attn (bool): If True, use torch.checkpoint for attention modules. Default: False.
use_checkpoint_ffn (bool): If True, use torch.checkpoint for feed-forward modules. Default: False.
"""
def __init__(self,
dim,
input_resolution,
depth,
num_heads,
window_size,
mlp_ratio=2.,
qkv_bias=True,
qk_scale=None,
drop_path=0.,
norm_layer=nn.LayerNorm,
use_checkpoint_attn=False,
use_checkpoint_ffn=None
):
super(RTMSA, self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.residual_group = TMSAG(dim=dim,
input_resolution=input_resolution,
depth=depth,
num_heads=num_heads,
window_size=window_size,
mut_attn=False,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_path=drop_path,
norm_layer=norm_layer,
use_checkpoint_attn=use_checkpoint_attn,
use_checkpoint_ffn=use_checkpoint_ffn
)
self.linear = nn.Linear(dim, dim)
def forward(self, x):
return x + self.linear(self.residual_group(x).transpose(1, 4)).transpose(1, 4)
class Stage(nn.Module):
"""Residual Temporal Mutual Self Attention Group and Parallel Warping.
Args:
in_dim (int): Number of input channels.
dim (int): Number of channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
mul_attn_ratio (float): Ratio of mutual attention layers. Default: 0.75.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
pa_frames (float): Number of warpped frames. Default: 2.
deformable_groups (float): Number of deformable groups. Default: 16.
reshape (str): Downscale (down), upscale (up) or keep the size (none).
max_residue_magnitude (float): Maximum magnitude of the residual of optical flow.
use_checkpoint_attn (bool): If True, use torch.checkpoint for attention modules. Default: False.
use_checkpoint_ffn (bool): If True, use torch.checkpoint for feed-forward modules. Default: False.
"""
def __init__(self,
in_dim,
dim,
input_resolution,
depth,
num_heads,
window_size,
mul_attn_ratio=0.75,
mlp_ratio=2.,
qkv_bias=True,
qk_scale=None,
drop_path=0.,
norm_layer=nn.LayerNorm,
pa_frames=2,
deformable_groups=16,
reshape=None,
max_residue_magnitude=10,
use_checkpoint_attn=False,
use_checkpoint_ffn=False
):
super(Stage, self).__init__()
self.pa_frames = pa_frames
# reshape the tensor
if reshape == 'none':
self.reshape = nn.Sequential(Rearrange('n c d h w -> n d h w c'),
nn.LayerNorm(dim),
Rearrange('n d h w c -> n c d h w'))
elif reshape == 'down':
self.reshape = nn.Sequential(Rearrange('n c d (h neih) (w neiw) -> n d h w (neiw neih c)', neih=2, neiw=2),
nn.LayerNorm(4 * in_dim), nn.Linear(4 * in_dim, dim),
Rearrange('n d h w c -> n c d h w'))
elif reshape == 'up':
self.reshape = nn.Sequential(Rearrange('n (neiw neih c) d h w -> n d (h neih) (w neiw) c', neih=2, neiw=2),
nn.LayerNorm(in_dim // 4), nn.Linear(in_dim // 4, dim),
Rearrange('n d h w c -> n c d h w'))
# mutual and self attention
self.residual_group1 = TMSAG(dim=dim,
input_resolution=input_resolution,
depth=int(depth * mul_attn_ratio),
num_heads=num_heads,
window_size=(2, window_size[1], window_size[2]),
mut_attn=True,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_path=drop_path,
norm_layer=norm_layer,
use_checkpoint_attn=use_checkpoint_attn,
use_checkpoint_ffn=use_checkpoint_ffn
)
self.linear1 = nn.Linear(dim, dim)
# only self attention
self.residual_group2 = TMSAG(dim=dim,
input_resolution=input_resolution,
depth=depth - int(depth * mul_attn_ratio),
num_heads=num_heads,
window_size=window_size,
mut_attn=False,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_path=drop_path,
norm_layer=norm_layer,
use_checkpoint_attn=True,
use_checkpoint_ffn=use_checkpoint_ffn
)
self.linear2 = nn.Linear(dim, dim)
# parallel warping
self.pa_deform = DCNv2PackFlowGuided(dim, dim, 3, padding=1, deformable_groups=deformable_groups,
max_residue_magnitude=max_residue_magnitude, pa_frames=pa_frames)
self.pa_fuse = Mlp_GEGLU(dim * (1 + 2), dim * (1 + 2), dim)
def forward(self, x, flows_backward, flows_forward):
x = self.reshape(x)
x = self.linear1(self.residual_group1(x).transpose(1, 4)).transpose(1, 4) + x
x = self.linear2(self.residual_group2(x).transpose(1, 4)).transpose(1, 4) + x
x = x.transpose(1, 2)
x_backward, x_forward = getattr(self, f'get_aligned_feature_{self.pa_frames}frames')(x, flows_backward, flows_forward)
x = self.pa_fuse(torch.cat([x, x_backward, x_forward], 2).permute(0, 1, 3, 4, 2)).permute(0, 4, 1, 2, 3)
return x
def get_aligned_feature_2frames(self, x, flows_backward, flows_forward):
'''Parallel feature warping for 2 frames.'''
# backward
n = x.size(1)
x_backward = [torch.zeros_like(x[:, -1, ...])]
for i in range(n - 1, 0, -1):
x_i = x[:, i, ...]
flow = flows_backward[0][:, i - 1, ...]
x_i_warped = flow_warp(x_i, flow.permute(0, 2, 3, 1), 'bilinear') # frame i+1 aligned towards i
x_backward.insert(0, self.pa_deform(x_i, [x_i_warped], x[:, i - 1, ...], [flow]))
# forward
x_forward = [torch.zeros_like(x[:, 0, ...])]
for i in range(0, n - 1):
x_i = x[:, i, ...]
flow = flows_forward[0][:, i, ...]
x_i_warped = flow_warp(x_i, flow.permute(0, 2, 3, 1), 'bilinear') # frame i-1 aligned towards i
x_forward.append(self.pa_deform(x_i, [x_i_warped], x[:, i + 1, ...], [flow]))
return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]
def get_aligned_feature_4frames(self, x, flows_backward, flows_forward):
'''Parallel feature warping for 4 frames.'''
# backward
n = x.size(1)
x_backward = [torch.zeros_like(x[:, -1, ...])]
for i in range(n, 1, -1):
x_i = x[:, i - 1, ...]
flow1 = flows_backward[0][:, i - 2, ...]
if i == n:
x_ii = torch.zeros_like(x[:, n - 2, ...])
flow2 = torch.zeros_like(flows_backward[1][:, n - 3, ...])
else:
x_ii = x[:, i, ...]
flow2 = flows_backward[1][:, i - 2, ...]
x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear') # frame i+1 aligned towards i
x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear') # frame i+2 aligned towards i
x_backward.insert(0,
self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i - 2, ...], [flow1, flow2]))
# forward
x_forward = [torch.zeros_like(x[:, 0, ...])]
for i in range(-1, n - 2):
x_i = x[:, i + 1, ...]
flow1 = flows_forward[0][:, i + 1, ...]
if i == -1:
x_ii = torch.zeros_like(x[:, 1, ...])
flow2 = torch.zeros_like(flows_forward[1][:, 0, ...])
else:
x_ii = x[:, i, ...]
flow2 = flows_forward[1][:, i, ...]
x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear') # frame i-1 aligned towards i
x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear') # frame i-2 aligned towards i
x_forward.append(
self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i + 2, ...], [flow1, flow2]))
return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]
def get_aligned_feature_6frames(self, x, flows_backward, flows_forward):
'''Parallel feature warping for 6 frames.'''
# backward
n = x.size(1)
x_backward = [torch.zeros_like(x[:, -1, ...])]
for i in range(n + 1, 2, -1):
x_i = x[:, i - 2, ...]
flow1 = flows_backward[0][:, i - 3, ...]
if i == n + 1:
x_ii = torch.zeros_like(x[:, -1, ...])
flow2 = torch.zeros_like(flows_backward[1][:, -1, ...])
x_iii = torch.zeros_like(x[:, -1, ...])
flow3 = torch.zeros_like(flows_backward[2][:, -1, ...])
elif i == n:
x_ii = x[:, i - 1, ...]
flow2 = flows_backward[1][:, i - 3, ...]
x_iii = torch.zeros_like(x[:, -1, ...])
flow3 = torch.zeros_like(flows_backward[2][:, -1, ...])
else:
x_ii = x[:, i - 1, ...]
flow2 = flows_backward[1][:, i - 3, ...]
x_iii = x[:, i, ...]
flow3 = flows_backward[2][:, i - 3, ...]
x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear') # frame i+1 aligned towards i
x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear') # frame i+2 aligned towards i
x_iii_warped = flow_warp(x_iii, flow3.permute(0, 2, 3, 1), 'bilinear') # frame i+3 aligned towards i
x_backward.insert(0,
self.pa_deform(torch.cat([x_i, x_ii, x_iii], 1), [x_i_warped, x_ii_warped, x_iii_warped],
x[:, i - 3, ...], [flow1, flow2, flow3]))
# forward
x_forward = [torch.zeros_like(x[:, 0, ...])]
for i in range(0, n - 1):
x_i = x[:, i, ...]
flow1 = flows_forward[0][:, i, ...]
if i == 0:
x_ii = torch.zeros_like(x[:, 0, ...])
flow2 = torch.zeros_like(flows_forward[1][:, 0, ...])
x_iii = torch.zeros_like(x[:, 0, ...])
flow3 = torch.zeros_like(flows_forward[2][:, 0, ...])
elif i == 1:
x_ii = x[:, i - 1, ...]
flow2 = flows_forward[1][:, i - 1, ...]
x_iii = torch.zeros_like(x[:, 0, ...])
flow3 = torch.zeros_like(flows_forward[2][:, 0, ...])
else:
x_ii = x[:, i - 1, ...]
flow2 = flows_forward[1][:, i - 1, ...]
x_iii = x[:, i - 2, ...]
flow3 = flows_forward[2][:, i - 2, ...]
x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear') # frame i-1 aligned towards i
x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear') # frame i-2 aligned towards i
x_iii_warped = flow_warp(x_iii, flow3.permute(0, 2, 3, 1), 'bilinear') # frame i-3 aligned towards i
x_forward.append(self.pa_deform(torch.cat([x_i, x_ii, x_iii], 1), [x_i_warped, x_ii_warped, x_iii_warped],
x[:, i + 1, ...], [flow1, flow2, flow3]))
return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]
class VRT(nn.Module):
""" Video Restoration Transformer (VRT).
A PyTorch impl of : `VRT: A Video Restoration Transformer` -
https://arxiv.org/pdf/2201.00000
Args:
upscale (int): Upscaling factor. Set as 1 for video deblurring, etc. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
img_size (int | tuple(int)): Size of input image. Default: [6, 64, 64].
window_size (int | tuple(int)): Window size. Default: (6,8,8).
depths (list[int]): Depths of each Transformer stage.
indep_reconsts (list[int]): Layers that extract features of different frames independently.
embed_dims (list[int]): Number of linear projection output channels.
num_heads (list[int]): Number of attention head of each stage.
mul_attn_ratio (float): Ratio of mutual attention layers. Default: 0.75.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 2.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True.
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (obj): Normalization layer. Default: nn.LayerNorm.
spynet_path (str): Pretrained SpyNet model path.
pa_frames (float): Number of warpped frames. Default: 2.
deformable_groups (float): Number of deformable groups. Default: 16.
recal_all_flows (bool): If True, derive (t,t+2) and (t,t+3) flows from (t,t+1). Default: False.
nonblind_denoising (bool): If True, conduct experiments on non-blind denoising. Default: False.
use_checkpoint_attn (bool): If True, use torch.checkpoint for attention modules. Default: False.
use_checkpoint_ffn (bool): If True, use torch.checkpoint for feed-forward modules. Default: False.
no_checkpoint_attn_blocks (list[int]): Layers without torch.checkpoint for attention modules.
no_checkpoint_ffn_blocks (list[int]): Layers without torch.checkpoint for feed-forward modules.
"""
def __init__(self,
upscale=4,
in_chans=3,
img_size=[6, 64, 64],
window_size=[6, 8, 8],
depths=[8, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4],
indep_reconsts=[11, 12],
embed_dims=[120, 120, 120, 120, 120, 120, 120, 180, 180, 180, 180, 180, 180],
num_heads=[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
mul_attn_ratio=0.75,
mlp_ratio=2.,
qkv_bias=True,
qk_scale=None,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
spynet_path=None,
pa_frames=2,
deformable_groups=16,
recal_all_flows=False,
nonblind_denoising=False,
use_checkpoint_attn=False,
use_checkpoint_ffn=False,
no_checkpoint_attn_blocks=[],
no_checkpoint_ffn_blocks=[],
):
super().__init__()
self.in_chans = in_chans
self.upscale = upscale
self.pa_frames = pa_frames
self.recal_all_flows = recal_all_flows
self.nonblind_denoising = nonblind_denoising
# conv_first
self.conv_first = nn.Conv3d(in_chans*(1+2*4)+1 if self.nonblind_denoising else in_chans*(1+2*4),
embed_dims[0], kernel_size=(1, 3, 3), padding=(0, 1, 1))
# main body
self.spynet = SpyNet(spynet_path, [2, 3, 4, 5])
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
reshapes = ['none', 'down', 'down', 'down', 'up', 'up', 'up']
scales = [1, 2, 4, 8, 4, 2, 1]
use_checkpoint_attns = [False if i in no_checkpoint_attn_blocks else use_checkpoint_attn for i in
range(len(depths))]
use_checkpoint_ffns = [False if i in no_checkpoint_ffn_blocks else use_checkpoint_ffn for i in
range(len(depths))]
# stage 1- 7
for i in range(7):
setattr(self, f'stage{i + 1}',
Stage(
in_dim=embed_dims[i - 1],
dim=embed_dims[i],
input_resolution=(img_size[0], img_size[1] // scales[i], img_size[2] // scales[i]),
depth=depths[i],
num_heads=num_heads[i],
mul_attn_ratio=mul_attn_ratio,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])],
norm_layer=norm_layer,
pa_frames=pa_frames,
deformable_groups=deformable_groups,
reshape=reshapes[i],
max_residue_magnitude=10 / scales[i],
use_checkpoint_attn=use_checkpoint_attns[i],
use_checkpoint_ffn=use_checkpoint_ffns[i],
)
)
# stage 8
self.stage8 = nn.ModuleList(
[nn.Sequential(
Rearrange('n c d h w -> n d h w c'),
nn.LayerNorm(embed_dims[6]),
nn.Linear(embed_dims[6], embed_dims[7]),
Rearrange('n d h w c -> n c d h w')
)]
)
for i in range(7, len(depths)):
self.stage8.append(
RTMSA(dim=embed_dims[i],
input_resolution=img_size,
depth=depths[i],
num_heads=num_heads[i],
window_size=[1, window_size[1], window_size[2]] if i in indep_reconsts else window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])],
norm_layer=norm_layer,
use_checkpoint_attn=use_checkpoint_attns[i],
use_checkpoint_ffn=use_checkpoint_ffns[i]
)
)
self.norm = norm_layer(embed_dims[-1])
self.conv_after_body = nn.Linear(embed_dims[-1], embed_dims[0])
# reconstruction
num_feat = 64
if self.upscale == 1:
# for video deblurring, etc.
self.conv_last = nn.Conv3d(embed_dims[0], in_chans, kernel_size=(1, 3, 3), padding=(0, 1, 1))
else:
# for video sr
self.conv_before_upsample = nn.Sequential(
nn.Conv3d(embed_dims[0], num_feat, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv3d(num_feat, in_chans, kernel_size=(1, 3, 3), padding=(0, 1, 1))
def forward(self, x):
# x: (N, D, C, H, W)
# obtain noise level map
if self.nonblind_denoising:
x, noise_level_map = x[:, :, :self.in_chans, :, :], x[:, :, self.in_chans:, :, :]
x_lq = x.clone()
# calculate flows
flows_backward, flows_forward = self.get_flows(x)
# warp input
x_backward, x_forward = self.get_aligned_image_2frames(x, flows_backward[0], flows_forward[0])
x = torch.cat([x, x_backward, x_forward], 2)
# concatenate noise level map
if self.nonblind_denoising:
x = torch.cat([x, noise_level_map], 2)
# main network
if self.upscale == 1:
# video deblurring, etc.
x = self.conv_first(x.transpose(1, 2))
x = x + self.conv_after_body(
self.forward_features(x, flows_backward, flows_forward).transpose(1, 4)).transpose(1, 4)
x = self.conv_last(x).transpose(1, 2)
return x + x_lq
else:
# video sr
x = self.conv_first(x.transpose(1, 2))
x = x + self.conv_after_body(
self.forward_features(x, flows_backward, flows_forward).transpose(1, 4)).transpose(1, 4)
x = self.conv_last(self.upsample(self.conv_before_upsample(x))).transpose(1, 2)
_, _, C, H, W = x.shape
return x + torch.nn.functional.interpolate(x_lq, size=(C, H, W), mode='trilinear', align_corners=False)
def get_flows(self, x):
''' Get flows for 2 frames, 4 frames or 6 frames.'''
if self.pa_frames == 2:
flows_backward, flows_forward = self.get_flow_2frames(x)
elif self.pa_frames == 4:
flows_backward_2frames, flows_forward_2frames = self.get_flow_2frames(x)
flows_backward_4frames, flows_forward_4frames = self.get_flow_4frames(flows_forward_2frames, flows_backward_2frames)
flows_backward = flows_backward_2frames + flows_backward_4frames
flows_forward = flows_forward_2frames + flows_forward_4frames
elif self.pa_frames == 6:
flows_backward_2frames, flows_forward_2frames = self.get_flow_2frames(x)
flows_backward_4frames, flows_forward_4frames = self.get_flow_4frames(flows_forward_2frames, flows_backward_2frames)
flows_backward_6frames, flows_forward_6frames = self.get_flow_6frames(flows_forward_2frames, flows_backward_2frames, flows_forward_4frames, flows_backward_4frames)
flows_backward = flows_backward_2frames + flows_backward_4frames + flows_backward_6frames
flows_forward = flows_forward_2frames + flows_forward_4frames + flows_forward_6frames
return flows_backward, flows_forward
def get_flow_2frames(self, x):
'''Get flow between frames t and t+1 from x.'''
b, n, c, h, w = x.size()
x_1 = x[:, :-1, :, :, :].reshape(-1, c, h, w)
x_2 = x[:, 1:, :, :, :].reshape(-1, c, h, w)
# backward
flows_backward = self.spynet(x_1, x_2)
flows_backward = [flow.view(b, n-1, 2, h // (2 ** i), w // (2 ** i)) for flow, i in
zip(flows_backward, range(4))]
# forward
flows_forward = self.spynet(x_2, x_1)
flows_forward = [flow.view(b, n-1, 2, h // (2 ** i), w // (2 ** i)) for flow, i in
zip(flows_forward, range(4))]
return flows_backward, flows_forward
def get_flow_4frames(self, flows_forward, flows_backward):
'''Get flow between t and t+2 from (t,t+1) and (t+1,t+2).'''
# backward
d = flows_forward[0].shape[1]
flows_backward2 = []
for flows in flows_backward:
flow_list = []
for i in range(d - 1, 0, -1):
flow_n1 = flows[:, i - 1, :, :, :] # flow from i+1 to i
flow_n2 = flows[:, i, :, :, :] # flow from i+2 to i+1
flow_list.insert(0, flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1))) # flow from i+2 to i
flows_backward2.append(torch.stack(flow_list, 1))
# forward
flows_forward2 = []
for flows in flows_forward:
flow_list = []
for i in range(1, d):
flow_n1 = flows[:, i, :, :, :] # flow from i-1 to i
flow_n2 = flows[:, i - 1, :, :, :] # flow from i-2 to i-1
flow_list.append(flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1))) # flow from i-2 to i
flows_forward2.append(torch.stack(flow_list, 1))
return flows_backward2, flows_forward2
def get_flow_6frames(self, flows_forward, flows_backward, flows_forward2, flows_backward2):
'''Get flow between t and t+3 from (t,t+2) and (t+2,t+3).'''
# backward
d = flows_forward2[0].shape[1]
flows_backward3 = []
for flows, flows2 in zip(flows_backward, flows_backward2):
flow_list = []
for i in range(d - 1, 0, -1):
flow_n1 = flows2[:, i - 1, :, :, :] # flow from i+2 to i
flow_n2 = flows[:, i + 1, :, :, :] # flow from i+3 to i+2
flow_list.insert(0, flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1))) # flow from i+3 to i
flows_backward3.append(torch.stack(flow_list, 1))
# forward
flows_forward3 = []
for flows, flows2 in zip(flows_forward, flows_forward2):
flow_list = []
for i in range(2, d + 1):
flow_n1 = flows2[:, i - 1, :, :, :] # flow from i-2 to i
flow_n2 = flows[:, i - 2, :, :, :] # flow from i-3 to i-2
flow_list.append(flow_n1 + flow_warp(flow_n2, flow_n1.permute(0, 2, 3, 1))) # flow from i-3 to i
flows_forward3.append(torch.stack(flow_list, 1))
return flows_backward3, flows_forward3
def get_aligned_image_2frames(self, x, flows_backward, flows_forward):
'''Parallel feature warping for 2 frames.'''
# backward
n = x.size(1)
x_backward = [torch.zeros_like(x[:, -1, ...]).repeat(1, 4, 1, 1)]
for i in range(n - 1, 0, -1):
x_i = x[:, i, ...]
flow = flows_backward[:, i - 1, ...]
x_backward.insert(0, flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4')) # frame i+1 aligned towards i
# forward
x_forward = [torch.zeros_like(x[:, 0, ...]).repeat(1, 4, 1, 1)]
for i in range(0, n - 1):
x_i = x[:, i, ...]
flow = flows_forward[:, i, ...]
x_forward.append(flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4')) # frame i-1 aligned towards i
return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]
def forward_features(self, x, flows_backward, flows_forward):
'''Main network for feature extraction.'''
x1 = self.stage1(x, flows_backward[0::4], flows_forward[0::4])
x2 = self.stage2(x1, flows_backward[1::4], flows_forward[1::4])
x3 = self.stage3(x2, flows_backward[2::4], flows_forward[2::4])
x4 = self.stage4(x3, flows_backward[3::4], flows_forward[3::4])
x = self.stage5(x4, flows_backward[2::4], flows_forward[2::4])
x = self.stage6(x + x3, flows_backward[1::4], flows_forward[1::4])
x = self.stage7(x + x2, flows_backward[0::4], flows_forward[0::4])
x = x + x1
for layer in self.stage8:
x = layer(x)
x = rearrange(x, 'n c d h w -> n d h w c')
x = self.norm(x)
x = rearrange(x, 'n d h w c -> n c d h w')
return x
if __name__ == '__main__':
device = torch.device('cpu')
upscale = 4
window_size = 8
height = (256 // upscale // window_size) * window_size
width = (256 // upscale // window_size) * window_size
model = VRT(upscale=4,
img_size=[6, 64, 64],
window_size=[6, 8, 8],
depths=[8, 8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4],
indep_reconsts=[11, 12],
embed_dims=[120, 120, 120, 120, 120, 120, 120, 180, 180, 180, 180, 180, 180],
num_heads=[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],
spynet_path=None,
pa_frames=2,
deformable_groups=12
).to(device)
print(model)
print('{:>16s} : {:<.4f} [M]'.format('#Params', sum(map(lambda x: x.numel(), model.parameters())) / 10 ** 6))
x = torch.randn((2, 12, 3, height, width)).to(device)
x = model(x)
print(x.shape)
| 69,614 | 43.482428 | 175 | py |
MaskedDenoising | MaskedDenoising-main/models/network_swinir.py | # -----------------------------------------------------------------------------------
# SwinIR: Image Restoration Using Swin Transformer, https://arxiv.org/abs/2108.10257
# Originally Written by Ze Liu, Modified by Jingyun Liang.
# -----------------------------------------------------------------------------------
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from entmax import entmax15
import random
import numpy as np
def stable_softmax(t, dim = -1):
# amax: Returns the maximum value of each slice of the input tensor in the given dimension(s) dim.
t = t - t.amax(dim = dim, keepdim = True).detach()
return F.softmax(t, dim = dim)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.,
talking_heads=False, use_attn_fn=False, head_scale=False, on_attn=False,
**kwargs):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
# =====================================================================================
self.use_attn_fn = use_attn_fn
if self.use_attn_fn == 'softmax':
self.attn_fn = F.softmax
elif self.use_attn_fn == 'entmax15':
self.attn_fn = entmax15
elif self.use_attn_fn == 'stable_softmax':
self.attn_fn = stable_softmax
# talking heads
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_talking_heads = nn.Conv2d(self.num_heads, self.num_heads, 1, bias = False)
self.post_softmax_talking_heads = nn.Conv2d(self.num_heads, self.num_heads, 1, bias = False)
# head scaling
self.head_scale = head_scale
if head_scale:
self.head_scale_params = nn.Parameter(torch.ones(1, self.num_heads, 1, 1))
self.use_mask = False
self.mask_ratio1 = 60
self.mask_ratio2 = 90
self.mask_is_diff = False
self.type = 'stand'
if 'use_mask' in kwargs:
self.use_mask = kwargs['use_mask']
if 'mask_ratio1' in kwargs:
self.mask_ratio1 = kwargs['mask_ratio1']
if 'mask_ratio2' in kwargs:
self.mask_ratio2 = kwargs['mask_ratio2']
if 'mask_is_diff' in kwargs:
self.mask_is_diff = kwargs['mask_is_diff']
if 'type' in kwargs:
self.type = kwargs['type']
if self.type == None:
self.type = 'stand'
# =====================================================================================
if self.mask_is_diff:
self.qkv1 = nn.Linear(dim, dim * 3 // self.num_heads, bias=qkv_bias)
self.qkv2 = nn.Linear(dim, dim * 3 // self.num_heads, bias=qkv_bias)
self.qkv3 = nn.Linear(dim, dim * 3 // self.num_heads, bias=qkv_bias)
self.qkv4 = nn.Linear(dim, dim * 3 // self.num_heads, bias=qkv_bias)
self.qkv5 = nn.Linear(dim, dim * 3 // self.num_heads, bias=qkv_bias)
self.qkv6 = nn.Linear(dim, dim * 3 // self.num_heads, bias=qkv_bias)
else:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
# attention on attention
self.attn_on_attn = on_attn
if self.attn_on_attn:
self.proj = nn.Sequential(nn.Linear(dim, dim * 2, bias = False), nn.GLU())
else:
if self.type == 'stand':
self.proj = nn.Linear(dim, dim)
elif self.type == 'sum':
self.proj = nn.Linear(dim // self.num_heads, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
# attention mask
if self.use_mask:
x = self.mask_image(x)
if self.mask_is_diff:
qkv1 = self.qkv1(x[0]).reshape(B_, N, 3, C // self.num_heads).permute(2, 0, 1, 3)
qkv2 = self.qkv2(x[1]).reshape(B_, N, 3, C // self.num_heads).permute(2, 0, 1, 3)
qkv3 = self.qkv3(x[2]).reshape(B_, N, 3, C // self.num_heads).permute(2, 0, 1, 3)
qkv4 = self.qkv4(x[3]).reshape(B_, N, 3, C // self.num_heads).permute(2, 0, 1, 3)
qkv5 = self.qkv5(x[4]).reshape(B_, N, 3, C // self.num_heads).permute(2, 0, 1, 3)
qkv6 = self.qkv6(x[5]).reshape(B_, N, 3, C // self.num_heads).permute(2, 0, 1, 3)
qkv = torch.cat([qkv1.unsqueeze(2), qkv2.unsqueeze(2), qkv3.unsqueeze(2),
qkv4.unsqueeze(2), qkv5.unsqueeze(2), qkv6.unsqueeze(2)], dim=2)
else:
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.talking_heads:
attn = self.pre_softmax_talking_heads(attn)
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
if self.use_attn_fn:
attn = self.attn_fn(attn, dim=-1)
else:
if self.use_attn_fn:
attn = self.attn_fn(attn, dim=-1)
attn = self.attn_drop(attn)
if self.talking_heads:
attn = self.post_softmax_talking_heads(attn)
x = attn @ v
if self.head_scale:
x = x * self.head_scale_params
if self.type == 'stand':
x = x.transpose(1, 2).reshape(B_, N, C)
elif self.type == 'sum':
x = x.sum(dim=1)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
def mask_image(self, image, x_size=None):
# attention mask
if not self.mask_is_diff:
prob_ = random.randint(self.mask_ratio1, self.mask_ratio2) / 100
mask1 = np.random.choice([0, 1], size=(image.shape[0], image.shape[1]), p=[prob_, 1 - prob_])
mask1 = torch.from_numpy(mask1).to(image.device).unsqueeze(-1)
noise_image1 = torch.mul(image, mask1)
return noise_image1
elif self.mask_is_diff:
mask_images = []
for i in range(self.num_heads):
prob_ = random.randint(self.mask_ratio1, self.mask_ratio2) / 100
mask1 = np.random.choice([0, 1], size=(image.shape[0], image.shape[1]), p=[prob_, 1 - prob_])
mask1 = torch.from_numpy(mask1).to(image.device).unsqueeze(-1)
noise_image1 = torch.mul(image, mask1)
mask_images.append(noise_image1)
return mask_images
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm,
talking_heads=False, use_attn_fn=False, head_scale=False, on_attn=False,
**kwargs):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,
talking_heads=talking_heads, use_attn_fn=use_attn_fn, head_scale=head_scale, on_attn=on_attn,
**kwargs)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
attn_mask = self.calculate_mask(self.input_resolution)
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def calculate_mask(self, x_size):
# calculate attention mask for SW-MSA
H, W = x_size
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
return attn_mask
def forward(self, x, x_size):
H, W = x_size
B, L, C = x.shape
# assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA (to be compatible for testing on images whose shapes are the multiple of window size
if self.input_resolution == x_size:
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
else:
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device))
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
talking_heads=False, use_attn_fn=False, head_scale=False, on_attn=False,
**kwargs):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
talking_heads=talking_heads, use_attn_fn=use_attn_fn, head_scale=head_scale, on_attn=on_attn,
**kwargs)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, x_size):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, x_size)
else:
x = blk(x, x_size)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class RSTB(nn.Module):
"""Residual Swin Transformer Block (RSTB).
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
img_size: Input image size.
patch_size: Patch size.
resi_connection: The convolutional block before residual connection.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
img_size=224, patch_size=4, resi_connection='1conv',
talking_heads=False, use_attn_fn=False, head_scale=False, on_attn=False,
**kwargs):
super(RSTB, self).__init__()
self.dim = dim
self.input_resolution = input_resolution
self.residual_group = BasicLayer(dim=dim,
input_resolution=input_resolution,
depth=depth,
num_heads=num_heads,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path,
norm_layer=norm_layer,
downsample=downsample,
use_checkpoint=use_checkpoint,
talking_heads=talking_heads, use_attn_fn=use_attn_fn, head_scale=head_scale, on_attn=on_attn,
**kwargs)
self.resi_connection = resi_connection
if resi_connection == '1conv':
self.conv = nn.Conv2d(dim, dim, 3, 1, 1)
elif resi_connection == '3conv':
# to save parameters and memory
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1))
elif resi_connection == '6conv':
# to save parameters and memory
self.conv = nn.Sequential(nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim, dim // 4, 3, 1, 1), nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(dim // 4, dim, 3, 1, 1),
)
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
norm_layer=None)
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim,
norm_layer=None)
# self.resi_scale = kwargs['resi_scale']
# self.resi = False if self.resi == False else True
def forward(self, x, x_size):
if self.resi_connection == '0conv':
# return self.residual_group(x, x_size) + x * self.resi_scale
return self.residual_group(x, x_size) + x
else:
# return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x * self.resi_scale
return self.patch_embed(self.conv(self.patch_unembed(self.residual_group(x, x_size), x_size))) + x
def flops(self):
flops = 0
flops += self.residual_group.flops()
H, W = self.input_resolution
flops += H * W * self.dim * self.dim * 9
flops += self.patch_embed.flops()
flops += self.patch_unembed.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
flops = 0
H, W = self.img_size
if self.norm is not None:
flops += H * W * self.embed_dim
return flops
class PatchUnEmbed(nn.Module):
r""" Image to Patch Unembedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
def forward(self, x, x_size):
B, HW, C = x.shape
x = x.transpose(1, 2).view(B, self.embed_dim, x_size[0], x_size[1]) # B Ph*Pw C
return x
def flops(self):
flops = 0
return flops
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2d(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2d(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
class UpsampleOneStep(nn.Sequential):
"""UpsampleOneStep module (the difference with Upsample is that it always only has 1conv + 1pixelshuffle)
Used in lightweight SR to save parameters.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat, num_out_ch, input_resolution=None):
self.num_feat = num_feat
self.input_resolution = input_resolution
m = []
m.append(nn.Conv2d(num_feat, (scale ** 2) * num_out_ch, 3, 1, 1))
m.append(nn.PixelShuffle(scale))
super(UpsampleOneStep, self).__init__(*m)
def flops(self):
H, W = self.input_resolution
flops = H * W * self.num_feat * 3 * 9
return flops
class SwinIR(nn.Module):
r""" SwinIR
A PyTorch impl of : `SwinIR: Image Restoration Using Swin Transformer`, based on Swin Transformer.
Args:
img_size (int | tuple(int)): Input image size. Default 64
patch_size (int | tuple(int)): Patch size. Default: 1
in_chans (int): Number of input image channels. Default: 3
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
upscale: Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range: Image range. 1. or 255.
upsampler: The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
resi_connection: The convolutional block before residual connection. '1conv'/'3conv'
"""
def __init__(self, img_size=64, patch_size=1, in_chans=3,
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv',
talking_heads=False, use_attn_fn=False, head_scale=False, on_attn=False, opt=None,
**kwargs):
super(SwinIR, self).__init__()
num_in_ch = in_chans
num_out_ch = in_chans
num_feat = 64
self.img_range = img_range
if in_chans == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
self.upscale = upscale
self.upsampler = upsampler
self.window_size = window_size
#####################################################################################################
######################################### Feature Embedding #########################################
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 1)
#####################################################################################################
#################################### deep feature extraction ########################################
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = embed_dim
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# merge non-overlapping patches into image
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build Residual Swin Transformer blocks (RSTB)
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = RSTB(dim=embed_dim,
input_resolution=(patches_resolution[0],
patches_resolution[1]),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
norm_layer=norm_layer,
downsample=None,
use_checkpoint=use_checkpoint,
img_size=img_size,
patch_size=patch_size,
resi_connection=resi_connection,
talking_heads=talking_heads,
use_attn_fn=use_attn_fn,
head_scale=head_scale,
on_attn=on_attn,
**kwargs,
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
# build the last conv layer in deep feature extraction
if resi_connection == '1conv':
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 3, 1, 1)
elif resi_connection == '0conv':
self.conv_after_body = nn.Conv2d(embed_dim, embed_dim, 1)
elif resi_connection == '3conv':
# to save parameters and memory
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
elif resi_connection == '6conv':
# to save parameters and memory
self.conv_after_body = nn.Sequential(nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(embed_dim // 4, embed_dim, 3, 1, 1))
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == 'pixelshuffle':
# for classical SR
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
(patches_resolution[0], patches_resolution[1]))
elif self.upsampler == 'nearest+conv':
# for real-world SR (less artifacts)
assert self.upscale == 4, 'only support x4 now.'
self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU(inplace=True))
self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, num_out_ch, 3, 1, 1)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def check_image_size(self, x):
_, _, h, w = x.size()
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_features(self, x):
x_size = (x.shape[2], x.shape[3])
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x, x_size)
x = self.norm(x) # B L C
x = self.patch_unembed(x, x_size)
return x
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
if self.upsampler == 'pixelshuffle':
# for classical SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == 'pixelshuffledirect':
# for lightweight SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.upsample(x)
elif self.upsampler == 'nearest+conv':
# for real-world SR
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.lrelu(self.conv_up1(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.lrelu(self.conv_up2(torch.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.conv_last(self.lrelu(self.conv_hr(x)))
else:
# for image denoising and JPEG compression artifact reduction
x_first = self.conv_first(x)
# res = self.conv_after_body(self.forward_features(x_first)) + x_first
# x = x + self.conv_last(res)
res = self.conv_after_body(self.forward_features(x_first))
x = self.conv_last(res)
x = x / self.img_range + self.mean
return x[:, :, :H*self.upscale, :W*self.upscale]
def flops(self):
flops = 0
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
return flops
if __name__ == '__main__':
upscale = 4
window_size = 8
height = (1024 // upscale // window_size + 1) * window_size
width = (720 // upscale // window_size + 1) * window_size
model = SwinIR(upscale=2, img_size=(height, width),
window_size=window_size, img_range=1., depths=[6, 6, 6, 6],
embed_dim=60, num_heads=[6, 6, 6, 6], mlp_ratio=2, upsampler='pixelshuffledirect')
print(model)
print(height, width, model.flops() / 1e9)
x = torch.randn((1, 3, height, width))
x = model(x)
print(x.shape)
| 46,138 | 42.733649 | 134 | py |
MaskedDenoising | MaskedDenoising-main/models/network_imdn.py | import math
import torch.nn as nn
import models.basicblock as B
"""
# --------------------------------------------
# simplified information multi-distillation
# network (IMDN) for SR
# --------------------------------------------
References:
@inproceedings{hui2019lightweight,
title={Lightweight Image Super-Resolution with Information Multi-distillation Network},
author={Hui, Zheng and Gao, Xinbo and Yang, Yunchu and Wang, Xiumei},
booktitle={Proceedings of the 27th ACM International Conference on Multimedia (ACM MM)},
pages={2024--2032},
year={2019}
}
@inproceedings{zhang2019aim,
title={AIM 2019 Challenge on Constrained Super-Resolution: Methods and Results},
author={Kai Zhang and Shuhang Gu and Radu Timofte and others},
booktitle={IEEE International Conference on Computer Vision Workshops},
year={2019}
}
# --------------------------------------------
"""
# --------------------------------------------
# modified version, https://github.com/Zheng222/IMDN
# first place solution for AIM 2019 challenge
# --------------------------------------------
class IMDN(nn.Module):
def __init__(self, in_nc=3, out_nc=3, nc=64, nb=8, upscale=4, act_mode='L', upsample_mode='pixelshuffle', negative_slope=0.05):
"""
in_nc: channel number of input
out_nc: channel number of output
nc: channel number
nb: number of residual blocks
upscale: up-scale factor
act_mode: activation function
upsample_mode: 'upconv' | 'pixelshuffle' | 'convtranspose'
"""
super(IMDN, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
m_head = B.conv(in_nc, nc, mode='C')
m_body = [B.IMDBlock(nc, nc, mode='C'+act_mode, negative_slope=negative_slope) for _ in range(nb)]
m_body.append(B.conv(nc, nc, mode='C'))
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
m_uper = upsample_block(nc, out_nc, mode=str(upscale))
self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper)
def forward(self, x):
x = self.model(x)
return x
| 2,513 | 36.522388 | 131 | py |
MaskedDenoising | MaskedDenoising-main/models/model_vrt.py | from collections import OrderedDict
import torch
import torch.nn as nn
from torch.optim import lr_scheduler
from torch.optim import Adam
from models.select_network import define_G
from models.model_plain import ModelPlain
from models.loss import CharbonnierLoss
from models.loss_ssim import SSIMLoss
from utils.utils_model import test_mode
from utils.utils_regularizers import regularizer_orth, regularizer_clip
class ModelVRT(ModelPlain):
"""Train video restoration with pixel loss"""
def __init__(self, opt):
super(ModelVRT, self).__init__(opt)
self.fix_iter = self.opt_train.get('fix_iter', 0)
self.fix_keys = self.opt_train.get('fix_keys', [])
self.fix_unflagged = True
# ----------------------------------------
# define optimizer
# ----------------------------------------
def define_optimizer(self):
self.fix_keys = self.opt_train.get('fix_keys', [])
if self.opt_train.get('fix_iter', 0) and len(self.fix_keys) > 0:
fix_lr_mul = self.opt_train['fix_lr_mul']
print(f'Multiple the learning rate for keys: {self.fix_keys} with {fix_lr_mul}.')
if fix_lr_mul == 1:
G_optim_params = self.netG.parameters()
else: # separate flow params and normal params for different lr
normal_params = []
flow_params = []
for name, param in self.netG.named_parameters():
if any([key in name for key in self.fix_keys]):
flow_params.append(param)
else:
normal_params.append(param)
G_optim_params = [
{ # add normal params first
'params': normal_params,
'lr': self.opt_train['G_optimizer_lr']
},
{
'params': flow_params,
'lr': self.opt_train['G_optimizer_lr'] * fix_lr_mul
},
]
if self.opt_train['G_optimizer_type'] == 'adam':
self.G_optimizer = Adam(G_optim_params, lr=self.opt_train['G_optimizer_lr'],
betas=self.opt_train['G_optimizer_betas'],
weight_decay=self.opt_train['G_optimizer_wd'])
else:
raise NotImplementedError
else:
super(ModelVRT, self).define_optimizer()
# ----------------------------------------
# update parameters and get loss
# ----------------------------------------
def optimize_parameters(self, current_step):
if self.fix_iter:
if self.fix_unflagged and current_step < self.fix_iter:
print(f'Fix keys: {self.fix_keys} for the first {self.fix_iter} iters.')
self.fix_unflagged = False
for name, param in self.netG.named_parameters():
if any([key in name for key in self.fix_keys]):
param.requires_grad_(False)
elif current_step == self.fix_iter:
print(f'Train all the parameters from {self.fix_iter} iters.')
self.netG.requires_grad_(True)
super(ModelVRT, self).optimize_parameters(current_step)
# ----------------------------------------
# test / inference
# ----------------------------------------
def test(self):
n = self.L.size(1)
self.netG.eval()
pad_seq = self.opt_train.get('pad_seq', False)
flip_seq = self.opt_train.get('flip_seq', False)
self.center_frame_only = self.opt_train.get('center_frame_only', False)
if pad_seq:
n = n + 1
self.L = torch.cat([self.L, self.L[:, -1:, :, :, :]], dim=1)
if flip_seq:
self.L = torch.cat([self.L, self.L.flip(1)], dim=1)
with torch.no_grad():
self.E = self._test_video(self.L)
if flip_seq:
output_1 = self.E[:, :n, :, :, :]
output_2 = self.E[:, n:, :, :, :].flip(1)
self.E = 0.5 * (output_1 + output_2)
if pad_seq:
n = n - 1
self.E = self.E[:, :n, :, :, :]
if self.center_frame_only:
self.E = self.E[:, n // 2, :, :, :]
self.netG.train()
def _test_video(self, lq):
'''test the video as a whole or as clips (divided temporally). '''
num_frame_testing = self.opt['val'].get('num_frame_testing', 0)
if num_frame_testing:
# test as multiple clips if out-of-memory
sf = self.opt['scale']
num_frame_overlapping = self.opt['val'].get('num_frame_overlapping', 2)
not_overlap_border = False
b, d, c, h, w = lq.size()
c = c - 1 if self.opt['netG'].get('nonblind_denoising', False) else c
stride = num_frame_testing - num_frame_overlapping
d_idx_list = list(range(0, d-num_frame_testing, stride)) + [max(0, d-num_frame_testing)]
E = torch.zeros(b, d, c, h*sf, w*sf)
W = torch.zeros(b, d, 1, 1, 1)
for d_idx in d_idx_list:
lq_clip = lq[:, d_idx:d_idx+num_frame_testing, ...]
out_clip = self._test_clip(lq_clip)
out_clip_mask = torch.ones((b, min(num_frame_testing, d), 1, 1, 1))
if not_overlap_border:
if d_idx < d_idx_list[-1]:
out_clip[:, -num_frame_overlapping//2:, ...] *= 0
out_clip_mask[:, -num_frame_overlapping//2:, ...] *= 0
if d_idx > d_idx_list[0]:
out_clip[:, :num_frame_overlapping//2, ...] *= 0
out_clip_mask[:, :num_frame_overlapping//2, ...] *= 0
E[:, d_idx:d_idx+num_frame_testing, ...].add_(out_clip)
W[:, d_idx:d_idx+num_frame_testing, ...].add_(out_clip_mask)
output = E.div_(W)
else:
# test as one clip (the whole video) if you have enough memory
window_size = self.opt['netG'].get('window_size', [6,8,8])
d_old = lq.size(1)
d_pad = (d_old// window_size[0]+1)*window_size[0] - d_old
lq = torch.cat([lq, torch.flip(lq[:, -d_pad:, ...], [1])], 1)
output = self._test_clip(lq)
output = output[:, :d_old, :, :, :]
return output
def _test_clip(self, lq):
''' test the clip as a whole or as patches. '''
sf = self.opt['scale']
window_size = self.opt['netG'].get('window_size', [6,8,8])
size_patch_testing = self.opt['val'].get('size_patch_testing', 0)
assert size_patch_testing % window_size[-1] == 0, 'testing patch size should be a multiple of window_size.'
if size_patch_testing:
# divide the clip to patches (spatially only, tested patch by patch)
overlap_size = 20
not_overlap_border = True
# test patch by patch
b, d, c, h, w = lq.size()
c = c - 1 if self.opt['netG'].get('nonblind_denoising', False) else c
stride = size_patch_testing - overlap_size
h_idx_list = list(range(0, h-size_patch_testing, stride)) + [max(0, h-size_patch_testing)]
w_idx_list = list(range(0, w-size_patch_testing, stride)) + [max(0, w-size_patch_testing)]
E = torch.zeros(b, d, c, h*sf, w*sf)
W = torch.zeros_like(E)
for h_idx in h_idx_list:
for w_idx in w_idx_list:
in_patch = lq[..., h_idx:h_idx+size_patch_testing, w_idx:w_idx+size_patch_testing]
if hasattr(self, 'netE'):
out_patch = self.netE(in_patch).detach().cpu()
else:
out_patch = self.netG(in_patch).detach().cpu()
out_patch_mask = torch.ones_like(out_patch)
if not_overlap_border:
if h_idx < h_idx_list[-1]:
out_patch[..., -overlap_size//2:, :] *= 0
out_patch_mask[..., -overlap_size//2:, :] *= 0
if w_idx < w_idx_list[-1]:
out_patch[..., :, -overlap_size//2:] *= 0
out_patch_mask[..., :, -overlap_size//2:] *= 0
if h_idx > h_idx_list[0]:
out_patch[..., :overlap_size//2, :] *= 0
out_patch_mask[..., :overlap_size//2, :] *= 0
if w_idx > w_idx_list[0]:
out_patch[..., :, :overlap_size//2] *= 0
out_patch_mask[..., :, :overlap_size//2] *= 0
E[..., h_idx*sf:(h_idx+size_patch_testing)*sf, w_idx*sf:(w_idx+size_patch_testing)*sf].add_(out_patch)
W[..., h_idx*sf:(h_idx+size_patch_testing)*sf, w_idx*sf:(w_idx+size_patch_testing)*sf].add_(out_patch_mask)
output = E.div_(W)
else:
_, _, _, h_old, w_old = lq.size()
h_pad = (h_old// window_size[1]+1)*window_size[1] - h_old
w_pad = (w_old// window_size[2]+1)*window_size[2] - w_old
lq = torch.cat([lq, torch.flip(lq[:, :, :, -h_pad:, :], [3])], 3)
lq = torch.cat([lq, torch.flip(lq[:, :, :, :, -w_pad:], [4])], 4)
if hasattr(self, 'netE'):
output = self.netE(lq).detach().cpu()
else:
output = self.netG(lq).detach().cpu()
output = output[:, :, :, :h_old*sf, :w_old*sf]
return output
# ----------------------------------------
# load the state_dict of the network
# ----------------------------------------
def load_network(self, load_path, network, strict=True, param_key='params'):
network = self.get_bare_model(network)
state_dict = torch.load(load_path)
if param_key in state_dict.keys():
state_dict = state_dict[param_key]
self._print_different_keys_loading(network, state_dict, strict)
network.load_state_dict(state_dict, strict=strict)
def _print_different_keys_loading(self, crt_net, load_net, strict=True):
crt_net = self.get_bare_model(crt_net)
crt_net = crt_net.state_dict()
crt_net_keys = set(crt_net.keys())
load_net_keys = set(load_net.keys())
if crt_net_keys != load_net_keys:
print('Current net - loaded net:')
for v in sorted(list(crt_net_keys - load_net_keys)):
print(f' {v}')
print('Loaded net - current net:')
for v in sorted(list(load_net_keys - crt_net_keys)):
print(f' {v}')
# check the size for the same keys
if not strict:
common_keys = crt_net_keys & load_net_keys
for k in common_keys:
if crt_net[k].size() != load_net[k].size():
print(f'Size different, ignore [{k}]: crt_net: '
f'{crt_net[k].shape}; load_net: {load_net[k].shape}')
load_net[k + '.ignore'] = load_net.pop(k)
| 11,246 | 42.42471 | 127 | py |
MaskedDenoising | MaskedDenoising-main/models/network_usrnet.py | import torch
import torch.nn as nn
import models.basicblock as B
import numpy as np
from utils import utils_image as util
"""
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
@inproceedings{zhang2020deep,
title={Deep unfolding network for image super-resolution},
author={Zhang, Kai and Van Gool, Luc and Timofte, Radu},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={0--0},
year={2020}
}
# --------------------------------------------
"""
"""
# --------------------------------------------
# basic functions
# --------------------------------------------
"""
def splits(a, sf):
'''split a into sfxsf distinct blocks
Args:
a: NxCxWxHx2
sf: split factor
Returns:
b: NxCx(W/sf)x(H/sf)x2x(sf^2)
'''
b = torch.stack(torch.chunk(a, sf, dim=2), dim=5)
b = torch.cat(torch.chunk(b, sf, dim=3), dim=5)
return b
def c2c(x):
return torch.from_numpy(np.stack([np.float32(x.real), np.float32(x.imag)], axis=-1))
def r2c(x):
# convert real to complex
return torch.stack([x, torch.zeros_like(x)], -1)
def cdiv(x, y):
# complex division
a, b = x[..., 0], x[..., 1]
c, d = y[..., 0], y[..., 1]
cd2 = c**2 + d**2
return torch.stack([(a*c+b*d)/cd2, (b*c-a*d)/cd2], -1)
def crdiv(x, y):
# complex/real division
a, b = x[..., 0], x[..., 1]
return torch.stack([a/y, b/y], -1)
def csum(x, y):
# complex + real
return torch.stack([x[..., 0] + y, x[..., 1]], -1)
def cabs(x):
# modulus of a complex number
return torch.pow(x[..., 0]**2+x[..., 1]**2, 0.5)
def cabs2(x):
return x[..., 0]**2+x[..., 1]**2
def cmul(t1, t2):
'''complex multiplication
Args:
t1: NxCxHxWx2, complex tensor
t2: NxCxHxWx2
Returns:
output: NxCxHxWx2
'''
real1, imag1 = t1[..., 0], t1[..., 1]
real2, imag2 = t2[..., 0], t2[..., 1]
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim=-1)
def cconj(t, inplace=False):
'''complex's conjugation
Args:
t: NxCxHxWx2
Returns:
output: NxCxHxWx2
'''
c = t.clone() if not inplace else t
c[..., 1] *= -1
return c
def rfft(t):
# Real-to-complex Discrete Fourier Transform
return torch.rfft(t, 2, onesided=False)
def irfft(t):
# Complex-to-real Inverse Discrete Fourier Transform
return torch.irfft(t, 2, onesided=False)
def fft(t):
# Complex-to-complex Discrete Fourier Transform
return torch.fft(t, 2)
def ifft(t):
# Complex-to-complex Inverse Discrete Fourier Transform
return torch.ifft(t, 2)
def p2o(psf, shape):
'''
Convert point-spread function to optical transfer function.
otf = p2o(psf) computes the Fast Fourier Transform (FFT) of the
point-spread function (PSF) array and creates the optical transfer
function (OTF) array that is not influenced by the PSF off-centering.
Args:
psf: NxCxhxw
shape: [H, W]
Returns:
otf: NxCxHxWx2
'''
otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
for axis, axis_size in enumerate(psf.shape[2:]):
otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
otf = torch.rfft(otf, 2, onesided=False)
n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
otf[..., 1][torch.abs(otf[..., 1]) < n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
return otf
def upsample(x, sf=3):
'''s-fold upsampler
Upsampling the spatial size by filling the new entries with zeros
x: tensor image, NxCxWxH
'''
st = 0
z = torch.zeros((x.shape[0], x.shape[1], x.shape[2]*sf, x.shape[3]*sf)).type_as(x)
z[..., st::sf, st::sf].copy_(x)
return z
def downsample(x, sf=3):
'''s-fold downsampler
Keeping the upper-left pixel for each distinct sfxsf patch and discarding the others
x: tensor image, NxCxWxH
'''
st = 0
return x[..., st::sf, st::sf]
def downsample_np(x, sf=3):
st = 0
return x[st::sf, st::sf, ...]
"""
# --------------------------------------------
# (1) Prior module; ResUNet: act as a non-blind denoiser
# x_k = P(z_k, beta_k)
# --------------------------------------------
"""
class ResUNet(nn.Module):
def __init__(self, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode='R', downsample_mode='strideconv', upsample_mode='convtranspose'):
super(ResUNet, self).__init__()
self.m_head = B.conv(in_nc, nc[0], bias=False, mode='C')
# downsample
if downsample_mode == 'avgpool':
downsample_block = B.downsample_avgpool
elif downsample_mode == 'maxpool':
downsample_block = B.downsample_maxpool
elif downsample_mode == 'strideconv':
downsample_block = B.downsample_strideconv
else:
raise NotImplementedError('downsample mode [{:s}] is not found'.format(downsample_mode))
self.m_down1 = B.sequential(*[B.ResBlock(nc[0], nc[0], bias=False, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[0], nc[1], bias=False, mode='2'))
self.m_down2 = B.sequential(*[B.ResBlock(nc[1], nc[1], bias=False, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[1], nc[2], bias=False, mode='2'))
self.m_down3 = B.sequential(*[B.ResBlock(nc[2], nc[2], bias=False, mode='C'+act_mode+'C') for _ in range(nb)], downsample_block(nc[2], nc[3], bias=False, mode='2'))
self.m_body = B.sequential(*[B.ResBlock(nc[3], nc[3], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
# upsample
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
self.m_up3 = B.sequential(upsample_block(nc[3], nc[2], bias=False, mode='2'), *[B.ResBlock(nc[2], nc[2], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_up2 = B.sequential(upsample_block(nc[2], nc[1], bias=False, mode='2'), *[B.ResBlock(nc[1], nc[1], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_up1 = B.sequential(upsample_block(nc[1], nc[0], bias=False, mode='2'), *[B.ResBlock(nc[0], nc[0], bias=False, mode='C'+act_mode+'C') for _ in range(nb)])
self.m_tail = B.conv(nc[0], out_nc, bias=False, mode='C')
def forward(self, x):
h, w = x.size()[-2:]
paddingBottom = int(np.ceil(h/8)*8-h)
paddingRight = int(np.ceil(w/8)*8-w)
x = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x)
x1 = self.m_head(x)
x2 = self.m_down1(x1)
x3 = self.m_down2(x2)
x4 = self.m_down3(x3)
x = self.m_body(x4)
x = self.m_up3(x+x4)
x = self.m_up2(x+x3)
x = self.m_up1(x+x2)
x = self.m_tail(x+x1)
x = x[..., :h, :w]
return x
"""
# --------------------------------------------
# (2) Data module, closed-form solution
# It is a trainable-parameter-free module ^_^
# z_k = D(x_{k-1}, s, k, y, alpha_k)
# some can be pre-calculated
# --------------------------------------------
"""
class DataNet(nn.Module):
def __init__(self):
super(DataNet, self).__init__()
def forward(self, x, FB, FBC, F2B, FBFy, alpha, sf):
FR = FBFy + torch.rfft(alpha*x, 2, onesided=False)
x1 = cmul(FB, FR)
FBR = torch.mean(splits(x1, sf), dim=-1, keepdim=False)
invW = torch.mean(splits(F2B, sf), dim=-1, keepdim=False)
invWBR = cdiv(FBR, csum(invW, alpha))
FCBinvWBR = cmul(FBC, invWBR.repeat(1, 1, sf, sf, 1))
FX = (FR-FCBinvWBR)/alpha.unsqueeze(-1)
Xest = torch.irfft(FX, 2, onesided=False)
return Xest
"""
# --------------------------------------------
# (3) Hyper-parameter module
# --------------------------------------------
"""
class HyPaNet(nn.Module):
def __init__(self, in_nc=2, out_nc=8, channel=64):
super(HyPaNet, self).__init__()
self.mlp = nn.Sequential(
nn.Conv2d(in_nc, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, out_nc, 1, padding=0, bias=True),
nn.Softplus())
def forward(self, x):
x = self.mlp(x) + 1e-6
return x
"""
# --------------------------------------------
# main USRNet
# deep unfolding super-resolution network
# --------------------------------------------
"""
class USRNet(nn.Module):
def __init__(self, n_iter=8, h_nc=64, in_nc=4, out_nc=3, nc=[64, 128, 256, 512], nb=2, act_mode='R', downsample_mode='strideconv', upsample_mode='convtranspose'):
super(USRNet, self).__init__()
self.d = DataNet()
self.p = ResUNet(in_nc=in_nc, out_nc=out_nc, nc=nc, nb=nb, act_mode=act_mode, downsample_mode=downsample_mode, upsample_mode=upsample_mode)
self.h = HyPaNet(in_nc=2, out_nc=n_iter*2, channel=h_nc)
self.n = n_iter
def forward(self, x, k, sf, sigma):
'''
x: tensor, NxCxWxH
k: tensor, Nx(1,3)xwxh
sf: integer, 1
sigma: tensor, Nx1x1x1
'''
# initialization & pre-calculation
w, h = x.shape[-2:]
FB = p2o(k, (w*sf, h*sf))
FBC = cconj(FB, inplace=False)
F2B = r2c(cabs2(FB))
STy = upsample(x, sf=sf)
FBFy = cmul(FBC, torch.rfft(STy, 2, onesided=False))
x = nn.functional.interpolate(x, scale_factor=sf, mode='nearest')
# hyper-parameter, alpha & beta
ab = self.h(torch.cat((sigma, torch.tensor(sf).type_as(sigma).expand_as(sigma)), dim=1))
# unfolding
for i in range(self.n):
x = self.d(x, FB, FBC, F2B, FBFy, ab[:, i:i+1, ...], sf)
x = self.p(torch.cat((x, ab[:, i+self.n:i+self.n+1, ...].repeat(1, 1, x.size(2), x.size(3))), dim=1))
return x
| 10,347 | 28.994203 | 172 | py |
MaskedDenoising | MaskedDenoising-main/models/network_rrdb.py | import math
import torch.nn as nn
import models.basicblock as B
"""
# --------------------------------------------
# SR network with Residual in Residual Dense Block (RRDB)
# "ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks"
# --------------------------------------------
"""
class RRDB(nn.Module):
"""
gc: number of growth channels
nb: number of RRDB
"""
def __init__(self, in_nc=3, out_nc=3, nc=64, nb=23, gc=32, upscale=4, act_mode='L', upsample_mode='upconv'):
super(RRDB, self).__init__()
assert 'R' in act_mode or 'L' in act_mode, 'Examples of activation function: R, L, BR, BL, IR, IL'
n_upscale = int(math.log(upscale, 2))
if upscale == 3:
n_upscale = 1
m_head = B.conv(in_nc, nc, mode='C')
m_body = [B.RRDB(nc, gc=32, mode='C'+act_mode) for _ in range(nb)]
m_body.append(B.conv(nc, nc, mode='C'))
if upsample_mode == 'upconv':
upsample_block = B.upsample_upconv
elif upsample_mode == 'pixelshuffle':
upsample_block = B.upsample_pixelshuffle
elif upsample_mode == 'convtranspose':
upsample_block = B.upsample_convtranspose
else:
raise NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))
if upscale == 3:
m_uper = upsample_block(nc, nc, mode='3'+act_mode)
else:
m_uper = [upsample_block(nc, nc, mode='2'+act_mode) for _ in range(n_upscale)]
H_conv0 = B.conv(nc, nc, mode='C'+act_mode)
H_conv1 = B.conv(nc, out_nc, mode='C')
m_tail = B.sequential(H_conv0, H_conv1)
self.model = B.sequential(m_head, B.ShortcutBlock(B.sequential(*m_body)), *m_uper, m_tail)
def forward(self, x):
x = self.model(x)
return x
| 1,828 | 32.254545 | 112 | py |
MaskedDenoising | MaskedDenoising-main/models/op/upfirdn2d.py | import os
import torch
from torch.autograd import Function
from torch.utils.cpp_extension import load, _import_module_from_library
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
'upfirdn2d',
sources=[
os.path.join(module_path, 'upfirdn2d.cpp'),
os.path.join(module_path, 'upfirdn2d_kernel.cu'),
],
)
#upfirdn2d_op = _import_module_from_library('upfirdn2d', '/tmp/torch_extensions/upfirdn2d', True)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
)
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_op.upfirdn2d(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
return out[:, ::down_y, ::down_x, :]
| 5,313 | 27.116402 | 108 | py |
MaskedDenoising | MaskedDenoising-main/models/op/fused_act.py | import os
import torch
from torch import nn
from torch.autograd import Function
from torch.utils.cpp_extension import load, _import_module_from_library
module_path = os.path.dirname(__file__)
fused = load(
'fused',
sources=[
os.path.join(module_path, 'fused_bias_act.cpp'),
os.path.join(module_path, 'fused_bias_act_kernel.cu'),
],
)
#fused = _import_module_from_library('fused', '/tmp/torch_extensions/fused', True)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(
grad_output, empty, out, 3, 1, negative_slope, scale
)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale
)
return grad_input, grad_bias, None, None
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
| 2,492 | 27.011236 | 83 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_matconvnet.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from collections import OrderedDict
# import scipy.io as io
import hdf5storage
"""
# --------------------------------------------
# Convert matconvnet SimpleNN model into pytorch model
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
# https://github.com/cszn
# 28/Nov/2019
# --------------------------------------------
"""
def weights2tensor(x, squeeze=False, in_features=None, out_features=None):
"""Modified version of https://github.com/albanie/pytorch-mcn
Adjust memory layout and load weights as torch tensor
Args:
x (ndaray): a numpy array, corresponding to a set of network weights
stored in column major order
squeeze (bool) [False]: whether to squeeze the tensor (i.e. remove
singletons from the trailing dimensions. So after converting to
pytorch layout (C_out, C_in, H, W), if the shape is (A, B, 1, 1)
it will be reshaped to a matrix with shape (A,B).
in_features (int :: None): used to reshape weights for a linear block.
out_features (int :: None): used to reshape weights for a linear block.
Returns:
torch.tensor: a permuted sets of weights, matching the pytorch layout
convention
"""
if x.ndim == 4:
x = x.transpose((3, 2, 0, 1))
# for FFDNet, pixel-shuffle layer
# if x.shape[1]==13:
# x=x[:,[0,2,1,3, 4,6,5,7, 8,10,9,11, 12],:,:]
# if x.shape[0]==12:
# x=x[[0,2,1,3, 4,6,5,7, 8,10,9,11],:,:,:]
# if x.shape[1]==5:
# x=x[:,[0,2,1,3, 4],:,:]
# if x.shape[0]==4:
# x=x[[0,2,1,3],:,:,:]
## for SRMD, pixel-shuffle layer
# if x.shape[0]==12:
# x=x[[0,2,1,3, 4,6,5,7, 8,10,9,11],:,:,:]
# if x.shape[0]==27:
# x=x[[0,3,6,1,4,7,2,5,8, 0+9,3+9,6+9,1+9,4+9,7+9,2+9,5+9,8+9, 0+18,3+18,6+18,1+18,4+18,7+18,2+18,5+18,8+18],:,:,:]
# if x.shape[0]==48:
# x=x[[0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15, 0+16,4+16,8+16,12+16,1+16,5+16,9+16,13+16,2+16,6+16,10+16,14+16,3+16,7+16,11+16,15+16, 0+32,4+32,8+32,12+32,1+32,5+32,9+32,13+32,2+32,6+32,10+32,14+32,3+32,7+32,11+32,15+32],:,:,:]
elif x.ndim == 3: # add by Kai
x = x[:,:,:,None]
x = x.transpose((3, 2, 0, 1))
elif x.ndim == 2:
if x.shape[1] == 1:
x = x.flatten()
if squeeze:
if in_features and out_features:
x = x.reshape((out_features, in_features))
x = np.squeeze(x)
return torch.from_numpy(np.ascontiguousarray(x))
def save_model(network, save_path):
state_dict = network.state_dict()
for key, param in state_dict.items():
state_dict[key] = param.cpu()
torch.save(state_dict, save_path)
if __name__ == '__main__':
# from utils import utils_logger
# import logging
# utils_logger.logger_info('a', 'a.log')
# logger = logging.getLogger('a')
#
# mcn = hdf5storage.loadmat('/model_zoo/matfile/FFDNet_Clip_gray.mat')
mcn = hdf5storage.loadmat('models/modelcolor.mat')
#logger.info(mcn['CNNdenoiser'][0][0][0][1][0][0][0][0])
mat_net = OrderedDict()
for idx in range(25):
mat_net[str(idx)] = OrderedDict()
count = -1
print(idx)
for i in range(13):
if mcn['CNNdenoiser'][0][idx][0][i][0][0][0][0] == 'conv':
count += 1
w = mcn['CNNdenoiser'][0][idx][0][i][0][1][0][0]
# print(w.shape)
w = weights2tensor(w)
# print(w.shape)
b = mcn['CNNdenoiser'][0][idx][0][i][0][1][0][1]
b = weights2tensor(b)
print(b.shape)
mat_net[str(idx)]['model.{:d}.weight'.format(count*2)] = w
mat_net[str(idx)]['model.{:d}.bias'.format(count*2)] = b
torch.save(mat_net, 'model_zoo/modelcolor.pth')
# from models.network_dncnn import IRCNN as net
# network = net(in_nc=3, out_nc=3, nc=64)
# state_dict = network.state_dict()
#
# #show_kv(state_dict)
#
# for i in range(len(mcn['net'][0][0][0])):
# print(mcn['net'][0][0][0][i][0][0][0][0])
#
# count = -1
# mat_net = OrderedDict()
# for i in range(len(mcn['net'][0][0][0])):
# if mcn['net'][0][0][0][i][0][0][0][0] == 'conv':
#
# count += 1
# w = mcn['net'][0][0][0][i][0][1][0][0]
# print(w.shape)
# w = weights2tensor(w)
# print(w.shape)
#
# b = mcn['net'][0][0][0][i][0][1][0][1]
# b = weights2tensor(b)
# print(b.shape)
#
# mat_net['model.{:d}.weight'.format(count*2)] = w
# mat_net['model.{:d}.bias'.format(count*2)] = b
#
# torch.save(mat_net, 'E:/pytorch/KAIR_ongoing/model_zoo/ffdnet_gray_clip.pth')
#
#
#
# crt_net = torch.load('E:/pytorch/KAIR_ongoing/model_zoo/imdn_x4.pth')
# def show_kv(net):
# for k, v in net.items():
# print(k)
#
# show_kv(crt_net)
# from models.network_dncnn import DnCNN as net
# network = net(in_nc=2, out_nc=1, nc=64, nb=20, act_mode='R')
# from models.network_srmd import SRMD as net
# #network = net(in_nc=1, out_nc=1, nc=64, nb=15, act_mode='R')
# network = net(in_nc=19, out_nc=3, nc=128, nb=12, upscale=4, act_mode='R', upsample_mode='pixelshuffle')
#
# from models.network_rrdb import RRDB as net
# network = net(in_nc=3, out_nc=3, nc=64, nb=23, gc=32, upscale=4, act_mode='L', upsample_mode='upconv')
#
# state_dict = network.state_dict()
# for key, param in state_dict.items():
# print(key)
# from models.network_imdn import IMDN as net
# network = net(in_nc=3, out_nc=3, nc=64, nb=8, upscale=4, act_mode='L', upsample_mode='pixelshuffle')
# state_dict = network.state_dict()
# mat_net = OrderedDict()
# for ((key, param),(key2, param2)) in zip(state_dict.items(), crt_net.items()):
# mat_net[key] = param2
# torch.save(mat_net, 'model_zoo/imdn_x4_1.pth')
#
# net_old = torch.load('net_old.pth')
# def show_kv(net):
# for k, v in net.items():
# print(k)
#
# show_kv(net_old)
# from models.network_dpsr import MSRResNet_prior as net
# model = net(in_nc=4, out_nc=3, nc=96, nb=16, upscale=4, act_mode='R', upsample_mode='pixelshuffle')
# state_dict = network.state_dict()
# net_new = OrderedDict()
# for ((key, param),(key_old, param_old)) in zip(state_dict.items(), net_old.items()):
# net_new[key] = param_old
# torch.save(net_new, 'net_new.pth')
# print(key)
# print(param.size())
# run utils/utils_matconvnet.py
| 6,804 | 33.368687 | 239 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_sisr.py | # -*- coding: utf-8 -*-
from utils import utils_image as util
import random
import scipy
import scipy.stats as ss
import scipy.io as io
from scipy import ndimage
from scipy.interpolate import interp2d
import numpy as np
import torch
"""
# --------------------------------------------
# Super-Resolution
# --------------------------------------------
#
# Kai Zhang (cskaizhang@gmail.com)
# https://github.com/cszn
# modified by Kai Zhang (github: https://github.com/cszn)
# 03/03/2020
# --------------------------------------------
"""
"""
# --------------------------------------------
# anisotropic Gaussian kernels
# --------------------------------------------
"""
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
""" generate an anisotropic Gaussian kernel
Args:
ksize : e.g., 15, kernel size
theta : [0, pi], rotation angle range
l1 : [0.1,50], scaling of eigenvalues
l2 : [0.1,l1], scaling of eigenvalues
If l1 = l2, will get an isotropic Gaussian kernel.
Returns:
k : kernel
"""
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
return k
def gm_blur_kernel(mean, cov, size=15):
center = size / 2.0 + 0.5
k = np.zeros([size, size])
for y in range(size):
for x in range(size):
cy = y - center + 1
cx = x - center + 1
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
k = k / np.sum(k)
return k
"""
# --------------------------------------------
# calculate PCA projection matrix
# --------------------------------------------
"""
def get_pca_matrix(x, dim_pca=15):
"""
Args:
x: 225x10000 matrix
dim_pca: 15
Returns:
pca_matrix: 15x225
"""
C = np.dot(x, x.T)
w, v = scipy.linalg.eigh(C)
pca_matrix = v[:, -dim_pca:].T
return pca_matrix
def show_pca(x):
"""
x: PCA projection matrix, e.g., 15x225
"""
for i in range(x.shape[0]):
xc = np.reshape(x[i, :], (int(np.sqrt(x.shape[1])), -1), order="F")
util.surf(xc)
def cal_pca_matrix(path='PCA_matrix.mat', ksize=15, l_max=12.0, dim_pca=15, num_samples=500):
kernels = np.zeros([ksize*ksize, num_samples], dtype=np.float32)
for i in range(num_samples):
theta = np.pi*np.random.rand(1)
l1 = 0.1+l_max*np.random.rand(1)
l2 = 0.1+(l1-0.1)*np.random.rand(1)
k = anisotropic_Gaussian(ksize=ksize, theta=theta[0], l1=l1[0], l2=l2[0])
# util.imshow(k)
kernels[:, i] = np.reshape(k, (-1), order="F") # k.flatten(order='F')
# io.savemat('k.mat', {'k': kernels})
pca_matrix = get_pca_matrix(kernels, dim_pca=dim_pca)
io.savemat(path, {'p': pca_matrix})
return pca_matrix
"""
# --------------------------------------------
# shifted anisotropic Gaussian kernels
# --------------------------------------------
"""
def shifted_anisotropic_Gaussian(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
""""
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
# max_var = 2.5 * sf
"""
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
theta = np.random.rand() * np.pi # random theta
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
# Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
# Set expectation position (shifting kernel for aligned image)
MU = k_size // 2 - 0.5*(scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
MU = MU[None, None, :, None]
# Create meshgrid for Gaussian
[X,Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
Z = np.stack([X, Y], 2)[:, :, :, None]
# Calcualte Gaussian for every pixel of the kernel
ZZ = Z-MU
ZZ_t = ZZ.transpose(0,1,3,2)
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
# shift the kernel so it will be centered
#raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
# Normalize the kernel and return
#kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
kernel = raw_kernel / np.sum(raw_kernel)
return kernel
def gen_kernel(k_size=np.array([25, 25]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=12., noise_level=0):
""""
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
# max_var = 2.5 * sf
"""
sf = random.choice([1, 2, 3, 4])
scale_factor = np.array([sf, sf])
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
theta = np.random.rand() * np.pi # random theta
noise = 0#-noise_level + np.random.rand(*k_size) * noise_level * 2
# Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
# Set expectation position (shifting kernel for aligned image)
MU = k_size // 2 - 0.5*(scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
MU = MU[None, None, :, None]
# Create meshgrid for Gaussian
[X,Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
Z = np.stack([X, Y], 2)[:, :, :, None]
# Calcualte Gaussian for every pixel of the kernel
ZZ = Z-MU
ZZ_t = ZZ.transpose(0,1,3,2)
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
# shift the kernel so it will be centered
#raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
# Normalize the kernel and return
#kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
kernel = raw_kernel / np.sum(raw_kernel)
return kernel
"""
# --------------------------------------------
# degradation models
# --------------------------------------------
"""
def bicubic_degradation(x, sf=3):
'''
Args:
x: HxWxC image, [0, 1]
sf: down-scale factor
Return:
bicubicly downsampled LR image
'''
x = util.imresize_np(x, scale=1/sf)
return x
def srmd_degradation(x, k, sf=3):
''' blur + bicubic downsampling
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
x = bicubic_degradation(x, sf=sf)
return x
def dpsr_degradation(x, k, sf=3):
''' bicubic downsampling + blur
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
'''
x = bicubic_degradation(x, sf=sf)
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
return x
def classical_degradation(x, k, sf=3):
''' blur + downsampling
Args:
x: HxWxC image, [0, 1]/[0, 255]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
#x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
st = 0
return x[st::sf, st::sf, ...]
def modcrop_np(img, sf):
'''
Args:
img: numpy image, WxH or WxHxC
sf: scale factor
Return:
cropped image
'''
w, h = img.shape[:2]
im = np.copy(img)
return im[:w - w % sf, :h - h % sf, ...]
'''
# =================
# Numpy
# =================
'''
def shift_pixel(x, sf, upper_left=True):
"""shift pixel for super-resolution with different scale factors
Args:
x: WxHxC or WxH, image or kernel
sf: scale factor
upper_left: shift direction
"""
h, w = x.shape[:2]
shift = (sf-1)*0.5
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
if upper_left:
x1 = xv + shift
y1 = yv + shift
else:
x1 = xv - shift
y1 = yv - shift
x1 = np.clip(x1, 0, w-1)
y1 = np.clip(y1, 0, h-1)
if x.ndim == 2:
x = interp2d(xv, yv, x)(x1, y1)
if x.ndim == 3:
for i in range(x.shape[-1]):
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
return x
'''
# =================
# pytorch
# =================
'''
def splits(a, sf):
'''
a: tensor NxCxWxHx2
sf: scale factor
out: tensor NxCx(W/sf)x(H/sf)x2x(sf^2)
'''
b = torch.stack(torch.chunk(a, sf, dim=2), dim=5)
b = torch.cat(torch.chunk(b, sf, dim=3), dim=5)
return b
def c2c(x):
return torch.from_numpy(np.stack([np.float32(x.real), np.float32(x.imag)], axis=-1))
def r2c(x):
return torch.stack([x, torch.zeros_like(x)], -1)
def cdiv(x, y):
a, b = x[..., 0], x[..., 1]
c, d = y[..., 0], y[..., 1]
cd2 = c**2 + d**2
return torch.stack([(a*c+b*d)/cd2, (b*c-a*d)/cd2], -1)
def csum(x, y):
return torch.stack([x[..., 0] + y, x[..., 1]], -1)
def cabs(x):
return torch.pow(x[..., 0]**2+x[..., 1]**2, 0.5)
def cmul(t1, t2):
'''
complex multiplication
t1: NxCxHxWx2
output: NxCxHxWx2
'''
real1, imag1 = t1[..., 0], t1[..., 1]
real2, imag2 = t2[..., 0], t2[..., 1]
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim=-1)
def cconj(t, inplace=False):
'''
# complex's conjugation
t: NxCxHxWx2
output: NxCxHxWx2
'''
c = t.clone() if not inplace else t
c[..., 1] *= -1
return c
def rfft(t):
return torch.rfft(t, 2, onesided=False)
def irfft(t):
return torch.irfft(t, 2, onesided=False)
def fft(t):
return torch.fft(t, 2)
def ifft(t):
return torch.ifft(t, 2)
def p2o(psf, shape):
'''
Args:
psf: NxCxhxw
shape: [H,W]
Returns:
otf: NxCxHxWx2
'''
otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
for axis, axis_size in enumerate(psf.shape[2:]):
otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
otf = torch.rfft(otf, 2, onesided=False)
n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
return otf
'''
# =================
PyTorch
# =================
'''
def INVLS_pytorch(FB, FBC, F2B, FR, tau, sf=2):
'''
FB: NxCxWxHx2
F2B: NxCxWxHx2
x1 = FB.*FR;
FBR = BlockMM(nr,nc,Nb,m,x1);
invW = BlockMM(nr,nc,Nb,m,F2B);
invWBR = FBR./(invW + tau*Nb);
fun = @(block_struct) block_struct.data.*invWBR;
FCBinvWBR = blockproc(FBC,[nr,nc],fun);
FX = (FR-FCBinvWBR)/tau;
Xest = real(ifft2(FX));
'''
x1 = cmul(FB, FR)
FBR = torch.mean(splits(x1, sf), dim=-1, keepdim=False)
invW = torch.mean(splits(F2B, sf), dim=-1, keepdim=False)
invWBR = cdiv(FBR, csum(invW, tau))
FCBinvWBR = cmul(FBC, invWBR.repeat(1,1,sf,sf,1))
FX = (FR-FCBinvWBR)/tau
Xest = torch.irfft(FX, 2, onesided=False)
return Xest
def real2complex(x):
return torch.stack([x, torch.zeros_like(x)], -1)
def modcrop(img, sf):
'''
img: tensor image, NxCxWxH or CxWxH or WxH
sf: scale factor
'''
w, h = img.shape[-2:]
im = img.clone()
return im[..., :w - w % sf, :h - h % sf]
def upsample(x, sf=3, center=False):
'''
x: tensor image, NxCxWxH
'''
st = (sf-1)//2 if center else 0
z = torch.zeros((x.shape[0], x.shape[1], x.shape[2]*sf, x.shape[3]*sf)).type_as(x)
z[..., st::sf, st::sf].copy_(x)
return z
def downsample(x, sf=3, center=False):
st = (sf-1)//2 if center else 0
return x[..., st::sf, st::sf]
def circular_pad(x, pad):
'''
# x[N, 1, W, H] -> x[N, 1, W + 2 pad, H + 2 pad] (pariodic padding)
'''
x = torch.cat([x, x[:, :, 0:pad, :]], dim=2)
x = torch.cat([x, x[:, :, :, 0:pad]], dim=3)
x = torch.cat([x[:, :, -2 * pad:-pad, :], x], dim=2)
x = torch.cat([x[:, :, :, -2 * pad:-pad], x], dim=3)
return x
def pad_circular(input, padding):
# type: (Tensor, List[int]) -> Tensor
"""
Arguments
:param input: tensor of shape :math:`(N, C_{\text{in}}, H, [W, D]))`
:param padding: (tuple): m-elem tuple where m is the degree of convolution
Returns
:return: tensor of shape :math:`(N, C_{\text{in}}, [D + 2 * padding[0],
H + 2 * padding[1]], W + 2 * padding[2]))`
"""
offset = 3
for dimension in range(input.dim() - offset + 1):
input = dim_pad_circular(input, padding[dimension], dimension + offset)
return input
def dim_pad_circular(input, padding, dimension):
# type: (Tensor, int, int) -> Tensor
input = torch.cat([input, input[[slice(None)] * (dimension - 1) +
[slice(0, padding)]]], dim=dimension - 1)
input = torch.cat([input[[slice(None)] * (dimension - 1) +
[slice(-2 * padding, -padding)]], input], dim=dimension - 1)
return input
def imfilter(x, k):
'''
x: image, NxcxHxW
k: kernel, cx1xhxw
'''
x = pad_circular(x, padding=((k.shape[-2]-1)//2, (k.shape[-1]-1)//2))
x = torch.nn.functional.conv2d(x, k, groups=x.shape[1])
return x
def G(x, k, sf=3, center=False):
'''
x: image, NxcxHxW
k: kernel, cx1xhxw
sf: scale factor
center: the first one or the moddle one
Matlab function:
tmp = imfilter(x,h,'circular');
y = downsample2(tmp,K);
'''
x = downsample(imfilter(x, k), sf=sf, center=center)
return x
def Gt(x, k, sf=3, center=False):
'''
x: image, NxcxHxW
k: kernel, cx1xhxw
sf: scale factor
center: the first one or the moddle one
Matlab function:
tmp = upsample2(x,K);
y = imfilter(tmp,h,'circular');
'''
x = imfilter(upsample(x, sf=sf, center=center), k)
return x
def interpolation_down(x, sf, center=False):
mask = torch.zeros_like(x)
if center:
start = torch.tensor((sf-1)//2)
mask[..., start::sf, start::sf] = torch.tensor(1).type_as(x)
LR = x[..., start::sf, start::sf]
else:
mask[..., ::sf, ::sf] = torch.tensor(1).type_as(x)
LR = x[..., ::sf, ::sf]
y = x.mul(mask)
return LR, y, mask
'''
# =================
Numpy
# =================
'''
def blockproc(im, blocksize, fun):
xblocks = np.split(im, range(blocksize[0], im.shape[0], blocksize[0]), axis=0)
xblocks_proc = []
for xb in xblocks:
yblocks = np.split(xb, range(blocksize[1], im.shape[1], blocksize[1]), axis=1)
yblocks_proc = []
for yb in yblocks:
yb_proc = fun(yb)
yblocks_proc.append(yb_proc)
xblocks_proc.append(np.concatenate(yblocks_proc, axis=1))
proc = np.concatenate(xblocks_proc, axis=0)
return proc
def fun_reshape(a):
return np.reshape(a, (-1,1,a.shape[-1]), order='F')
def fun_mul(a, b):
return a*b
def BlockMM(nr, nc, Nb, m, x1):
'''
myfun = @(block_struct) reshape(block_struct.data,m,1);
x1 = blockproc(x1,[nr nc],myfun);
x1 = reshape(x1,m,Nb);
x1 = sum(x1,2);
x = reshape(x1,nr,nc);
'''
fun = fun_reshape
x1 = blockproc(x1, blocksize=(nr, nc), fun=fun)
x1 = np.reshape(x1, (m, Nb, x1.shape[-1]), order='F')
x1 = np.sum(x1, 1)
x = np.reshape(x1, (nr, nc, x1.shape[-1]), order='F')
return x
def INVLS(FB, FBC, F2B, FR, tau, Nb, nr, nc, m):
'''
x1 = FB.*FR;
FBR = BlockMM(nr,nc,Nb,m,x1);
invW = BlockMM(nr,nc,Nb,m,F2B);
invWBR = FBR./(invW + tau*Nb);
fun = @(block_struct) block_struct.data.*invWBR;
FCBinvWBR = blockproc(FBC,[nr,nc],fun);
FX = (FR-FCBinvWBR)/tau;
Xest = real(ifft2(FX));
'''
x1 = FB*FR
FBR = BlockMM(nr, nc, Nb, m, x1)
invW = BlockMM(nr, nc, Nb, m, F2B)
invWBR = FBR/(invW + tau*Nb)
FCBinvWBR = blockproc(FBC, [nr, nc], lambda im: fun_mul(im, invWBR))
FX = (FR-FCBinvWBR)/tau
Xest = np.real(np.fft.ifft2(FX, axes=(0, 1)))
return Xest
def psf2otf(psf, shape=None):
"""
Convert point-spread function to optical transfer function.
Compute the Fast Fourier Transform (FFT) of the point-spread
function (PSF) array and creates the optical transfer function (OTF)
array that is not influenced by the PSF off-centering.
By default, the OTF array is the same size as the PSF array.
To ensure that the OTF is not altered due to PSF off-centering, PSF2OTF
post-pads the PSF array (down or to the right) with zeros to match
dimensions specified in OUTSIZE, then circularly shifts the values of
the PSF array up (or to the left) until the central pixel reaches (1,1)
position.
Parameters
----------
psf : `numpy.ndarray`
PSF array
shape : int
Output shape of the OTF array
Returns
-------
otf : `numpy.ndarray`
OTF array
Notes
-----
Adapted from MATLAB psf2otf function
"""
if type(shape) == type(None):
shape = psf.shape
shape = np.array(shape)
if np.all(psf == 0):
# return np.zeros_like(psf)
return np.zeros(shape)
if len(psf.shape) == 1:
psf = psf.reshape((1, psf.shape[0]))
inshape = psf.shape
psf = zero_pad(psf, shape, position='corner')
for axis, axis_size in enumerate(inshape):
psf = np.roll(psf, -int(axis_size / 2), axis=axis)
# Compute the OTF
otf = np.fft.fft2(psf, axes=(0, 1))
# Estimate the rough number of operations involved in the FFT
# and discard the PSF imaginary part if within roundoff error
# roundoff error = machine epsilon = sys.float_info.epsilon
# or np.finfo().eps
n_ops = np.sum(psf.size * np.log2(psf.shape))
otf = np.real_if_close(otf, tol=n_ops)
return otf
def zero_pad(image, shape, position='corner'):
"""
Extends image to a certain size with zeros
Parameters
----------
image: real 2d `numpy.ndarray`
Input image
shape: tuple of int
Desired output shape of the image
position : str, optional
The position of the input image in the output one:
* 'corner'
top-left corner (default)
* 'center'
centered
Returns
-------
padded_img: real `numpy.ndarray`
The zero-padded image
"""
shape = np.asarray(shape, dtype=int)
imshape = np.asarray(image.shape, dtype=int)
if np.alltrue(imshape == shape):
return image
if np.any(shape <= 0):
raise ValueError("ZERO_PAD: null or negative shape given")
dshape = shape - imshape
if np.any(dshape < 0):
raise ValueError("ZERO_PAD: target size smaller than source one")
pad_img = np.zeros(shape, dtype=image.dtype)
idx, idy = np.indices(imshape)
if position == 'center':
if np.any(dshape % 2 != 0):
raise ValueError("ZERO_PAD: source and target shapes "
"have different parity.")
offx, offy = dshape // 2
else:
offx, offy = (0, 0)
pad_img[idx + offx, idy + offy] = image
return pad_img
def upsample_np(x, sf=3, center=False):
st = (sf-1)//2 if center else 0
z = np.zeros((x.shape[0]*sf, x.shape[1]*sf, x.shape[2]))
z[st::sf, st::sf, ...] = x
return z
def downsample_np(x, sf=3, center=False):
st = (sf-1)//2 if center else 0
return x[st::sf, st::sf, ...]
def imfilter_np(x, k):
'''
x: image, NxcxHxW
k: kernel, cx1xhxw
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
return x
def G_np(x, k, sf=3, center=False):
'''
x: image, NxcxHxW
k: kernel, cx1xhxw
Matlab function:
tmp = imfilter(x,h,'circular');
y = downsample2(tmp,K);
'''
x = downsample_np(imfilter_np(x, k), sf=sf, center=center)
return x
def Gt_np(x, k, sf=3, center=False):
'''
x: image, NxcxHxW
k: kernel, cx1xhxw
Matlab function:
tmp = upsample2(x,K);
y = imfilter(tmp,h,'circular');
'''
x = imfilter_np(upsample_np(x, sf=sf, center=center), k)
return x
if __name__ == '__main__':
img = util.imread_uint('test.bmp', 3)
img = util.uint2single(img)
k = anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6)
util.imshow(k*10)
for sf in [2, 3, 4]:
# modcrop
img = modcrop_np(img, sf=sf)
# 1) bicubic degradation
img_b = bicubic_degradation(img, sf=sf)
print(img_b.shape)
# 2) srmd degradation
img_s = srmd_degradation(img, k, sf=sf)
print(img_s.shape)
# 3) dpsr degradation
img_d = dpsr_degradation(img, k, sf=sf)
print(img_d.shape)
# 4) classical degradation
img_d = classical_degradation(img, k, sf=sf)
print(img_d.shape)
k = anisotropic_Gaussian(ksize=7, theta=0.25*np.pi, l1=0.01, l2=0.01)
#print(k)
# util.imshow(k*10)
k = shifted_anisotropic_Gaussian(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.8, max_var=10.8, noise_level=0.0)
# util.imshow(k*10)
# PCA
# pca_matrix = cal_pca_matrix(ksize=15, l_max=10.0, dim_pca=15, num_samples=12500)
# print(pca_matrix.shape)
# show_pca(pca_matrix)
# run utils/utils_sisr.py
# run utils_sisr.py
| 23,082 | 26.188457 | 138 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_image.py | import os
import math
import random
import numpy as np
import torch
import cv2
from torchvision.utils import make_grid
from datetime import datetime
# import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
# https://github.com/twhui/SRGAN-pyTorch
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def imshow(x, title=None, cbar=False, figsize=None):
plt.figure(figsize=figsize)
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
if title:
plt.title(title)
if cbar:
plt.colorbar()
plt.show()
def surf(Z, cmap='rainbow', figsize=None):
plt.figure(figsize=figsize)
ax3 = plt.axes(projection='3d')
w, h = Z.shape[:2]
xx = np.arange(0,w,1)
yy = np.arange(0,h,1)
X, Y = np.meshgrid(xx, yy)
ax3.plot_surface(X,Y,Z,cmap=cmap)
#ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
plt.show()
'''
# --------------------------------------------
# get image pathes
# --------------------------------------------
'''
def get_image_paths(dataroot):
paths = None # return None if dataroot is None
if isinstance(dataroot, str):
paths = sorted(_get_paths_from_images(dataroot))
elif isinstance(dataroot, list):
paths = []
for i in dataroot:
paths += sorted(_get_paths_from_images(i))
return paths
def _get_paths_from_images(path):
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
'''
# --------------------------------------------
# split large images into small images
# --------------------------------------------
'''
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
w, h = img.shape[:2]
patches = []
if w > p_max and h > p_max:
w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
w1.append(w-p_size)
h1.append(h-p_size)
# print(w1)
# print(h1)
for i in w1:
for j in h1:
patches.append(img[i:i+p_size, j:j+p_size,:])
else:
patches.append(img)
return patches
def imssave(imgs, img_path):
"""
imgs: list, N images of size WxHxC
"""
img_name, ext = os.path.splitext(os.path.basename(img_path))
for i, img in enumerate(imgs):
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
new_path = os.path.join(os.path.dirname(img_path), img_name+str('_{:04d}'.format(i))+'.png')
cv2.imwrite(new_path, img)
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=512, p_overlap=96, p_max=800):
"""
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
will be splitted.
Args:
original_dataroot:
taget_dataroot:
p_size: size of small images
p_overlap: patch size in training is a good choice
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
"""
paths = get_image_paths(original_dataroot)
for img_path in paths:
# img_name, ext = os.path.splitext(os.path.basename(img_path))
img = imread_uint(img_path, n_channels=n_channels)
patches = patches_from_image(img, p_size, p_overlap, p_max)
imssave(patches, os.path.join(taget_dataroot, os.path.basename(img_path)))
#if original_dataroot == taget_dataroot:
#del img_path
'''
# --------------------------------------------
# makedir
# --------------------------------------------
'''
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
'''
# --------------------------------------------
# read image from path
# opencv is fast, but read BGR numpy image
# --------------------------------------------
'''
# --------------------------------------------
# get uint8 image of size HxWxn_channles (RGB)
# --------------------------------------------
def imread_uint(path, n_channels=3):
# input: path
# output: HxWx3(RGB or GGG), or HxWx1 (G)
if n_channels == 1:
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
img = np.expand_dims(img, axis=2) # HxWx1
elif n_channels == 3:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
return img
# --------------------------------------------
# matlab's imwrite
# --------------------------------------------
def imsave(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
def imwrite(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
# --------------------------------------------
# get single image of size HxWxn_channles (BGR)
# --------------------------------------------
def read_img(path):
# read image by cv2
# return: Numpy float32, HWC, BGR, [0,1]
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
'''
# --------------------------------------------
# image format conversion
# --------------------------------------------
# numpy(single) <---> numpy(uint)
# numpy(single) <---> tensor
# numpy(uint) <---> tensor
# --------------------------------------------
'''
# --------------------------------------------
# numpy(single) [0, 1] <---> numpy(uint)
# --------------------------------------------
def uint2single(img):
return np.float32(img/255.)
def single2uint(img):
return np.uint8((img.clip(0, 1)*255.).round())
def uint162single(img):
return np.float32(img/65535.)
def single2uint16(img):
return np.uint16((img.clip(0, 1)*65535.).round())
# --------------------------------------------
# numpy(uint) (HxWxC or HxW) <---> tensor
# --------------------------------------------
# convert uint to 4-dimensional torch tensor
def uint2tensor4(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
# convert uint to 3-dimensional torch tensor
def uint2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
# convert 2/3/4-dimensional torch tensor to uint
def tensor2uint(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return np.uint8((img*255.0).round())
# --------------------------------------------
# numpy(single) (HxWxC) <---> tensor
# --------------------------------------------
# convert single (HxWxC) to 3-dimensional torch tensor
def single2tensor3(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert single (HxWxC) to 4-dimensional torch tensor
def single2tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
# convert torch tensor to single
def tensor2single(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
# convert torch tensor to single
def tensor2single3(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
elif img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
def single2tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
def single32tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
def single42tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
# from skimage.io import imread, imsave
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.uint8() WILL NOT round by default.
return img_np.astype(out_type)
'''
# --------------------------------------------
# Augmentation, flipe and/or rotate
# --------------------------------------------
# The following two are enough.
# (1) augmet_img: numpy image of WxHxC or WxH
# (2) augment_img_tensor4: tensor image 1xCxWxH
# --------------------------------------------
'''
def augment_img(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return np.flipud(np.rot90(img))
elif mode == 2:
return np.flipud(img)
elif mode == 3:
return np.rot90(img, k=3)
elif mode == 4:
return np.flipud(np.rot90(img, k=2))
elif mode == 5:
return np.rot90(img)
elif mode == 6:
return np.rot90(img, k=2)
elif mode == 7:
return np.flipud(np.rot90(img, k=3))
def augment_img_tensor4(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return img.rot90(1, [2, 3]).flip([2])
elif mode == 2:
return img.flip([2])
elif mode == 3:
return img.rot90(3, [2, 3])
elif mode == 4:
return img.rot90(2, [2, 3]).flip([2])
elif mode == 5:
return img.rot90(1, [2, 3])
elif mode == 6:
return img.rot90(2, [2, 3])
elif mode == 7:
return img.rot90(3, [2, 3]).flip([2])
def augment_img_tensor(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
img_size = img.size()
img_np = img.data.cpu().numpy()
if len(img_size) == 3:
img_np = np.transpose(img_np, (1, 2, 0))
elif len(img_size) == 4:
img_np = np.transpose(img_np, (2, 3, 1, 0))
img_np = augment_img(img_np, mode=mode)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
if len(img_size) == 3:
img_tensor = img_tensor.permute(2, 0, 1)
elif len(img_size) == 4:
img_tensor = img_tensor.permute(3, 2, 0, 1)
return img_tensor.type_as(img)
def augment_img_np3(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return img.transpose(1, 0, 2)
elif mode == 2:
return img[::-1, :, :]
elif mode == 3:
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 4:
return img[:, ::-1, :]
elif mode == 5:
img = img[:, ::-1, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 6:
img = img[:, ::-1, :]
img = img[::-1, :, :]
return img
elif mode == 7:
img = img[:, ::-1, :]
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
def augment_imgs(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
'''
# --------------------------------------------
# modcrop and shave
# --------------------------------------------
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def shave(img_in, border=0):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
h, w = img.shape[:2]
img = img[border:h-border, border:w-border]
return img
'''
# --------------------------------------------
# image processing process on numpy image
# channel_convert(in_c, tar_type, img_list):
# rgb2ycbcr(img, only_y=True):
# bgr2ycbcr(img, only_y=True):
# ycbcr2rgb(img):
# --------------------------------------------
'''
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
rlt = np.clip(rlt, 0, 255)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True):
'''bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
'''
# --------------------------------------------
# metric, PSNR, SSIM and PSNRB
# --------------------------------------------
'''
# --------------------------------------------
# PSNR
# --------------------------------------------
def calculate_psnr(img1, img2, border=0):
# img1 and img2 have range [0, 255]
#img1 = img1.squeeze()
#img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
# --------------------------------------------
# SSIM
# --------------------------------------------
def calculate_ssim(img1, img2, border=0):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
#img1 = img1.squeeze()
#img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def _blocking_effect_factor(im):
block_size = 8
block_horizontal_positions = torch.arange(7, im.shape[3] - 1, 8)
block_vertical_positions = torch.arange(7, im.shape[2] - 1, 8)
horizontal_block_difference = (
(im[:, :, :, block_horizontal_positions] - im[:, :, :, block_horizontal_positions + 1]) ** 2).sum(
3).sum(2).sum(1)
vertical_block_difference = (
(im[:, :, block_vertical_positions, :] - im[:, :, block_vertical_positions + 1, :]) ** 2).sum(3).sum(
2).sum(1)
nonblock_horizontal_positions = np.setdiff1d(torch.arange(0, im.shape[3] - 1), block_horizontal_positions)
nonblock_vertical_positions = np.setdiff1d(torch.arange(0, im.shape[2] - 1), block_vertical_positions)
horizontal_nonblock_difference = (
(im[:, :, :, nonblock_horizontal_positions] - im[:, :, :, nonblock_horizontal_positions + 1]) ** 2).sum(
3).sum(2).sum(1)
vertical_nonblock_difference = (
(im[:, :, nonblock_vertical_positions, :] - im[:, :, nonblock_vertical_positions + 1, :]) ** 2).sum(
3).sum(2).sum(1)
n_boundary_horiz = im.shape[2] * (im.shape[3] // block_size - 1)
n_boundary_vert = im.shape[3] * (im.shape[2] // block_size - 1)
boundary_difference = (horizontal_block_difference + vertical_block_difference) / (
n_boundary_horiz + n_boundary_vert)
n_nonboundary_horiz = im.shape[2] * (im.shape[3] - 1) - n_boundary_horiz
n_nonboundary_vert = im.shape[3] * (im.shape[2] - 1) - n_boundary_vert
nonboundary_difference = (horizontal_nonblock_difference + vertical_nonblock_difference) / (
n_nonboundary_horiz + n_nonboundary_vert)
scaler = np.log2(block_size) / np.log2(min([im.shape[2], im.shape[3]]))
bef = scaler * (boundary_difference - nonboundary_difference)
bef[boundary_difference <= nonboundary_difference] = 0
return bef
def calculate_psnrb(img1, img2, border=0):
"""Calculate PSNR-B (Peak Signal-to-Noise Ratio).
Ref: Quality assessment of deblocked images, for JPEG image deblocking evaluation
# https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the PSNR calculation.
test_y_channel (bool): Test on Y channel of YCbCr. Default: False.
Returns:
float: psnr result.
"""
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
if img1.ndim == 2:
img1, img2 = np.expand_dims(img1, 2), np.expand_dims(img2, 2)
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
# follow https://gitlab.com/Queuecumber/quantization-guided-ac/-/blob/master/metrics/psnrb.py
img1 = torch.from_numpy(img1).permute(2, 0, 1).unsqueeze(0) / 255.
img2 = torch.from_numpy(img2).permute(2, 0, 1).unsqueeze(0) / 255.
total = 0
for c in range(img1.shape[1]):
mse = torch.nn.functional.mse_loss(img1[:, c:c + 1, :, :], img2[:, c:c + 1, :, :], reduction='none')
bef = _blocking_effect_factor(img1[:, c:c + 1, :, :])
mse = mse.view(mse.shape[0], -1).mean(1)
total += 10 * torch.log10(1 / (mse + bef))
return float(total) / img1.shape[1]
'''
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
if __name__ == '__main__':
img = imread_uint('test.bmp', 3)
# img = uint2single(img)
# img_bicubic = imresize_np(img, 1/4)
# imshow(single2uint(img_bicubic))
#
# img_tensor = single2tensor4(img)
# for i in range(8):
# imshow(np.concatenate((augment_img(img, i), tensor2single(augment_img_tensor4(img_tensor, i))), 1))
# patches = patches_from_image(img, p_size=128, p_overlap=0, p_max=200)
# imssave(patches,'a.png')
| 33,132 | 31.579154 | 120 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_dist.py | # Modified from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/dist_utils.py # noqa: E501
import functools
import os
import subprocess
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
# ----------------------------------
# init
# ----------------------------------
def init_dist(launcher, backend='nccl', **kwargs):
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method('spawn')
if launcher == 'pytorch':
_init_dist_pytorch(backend, **kwargs)
elif launcher == 'slurm':
_init_dist_slurm(backend, **kwargs)
else:
raise ValueError(f'Invalid launcher type: {launcher}')
def _init_dist_pytorch(backend, **kwargs):
rank = int(os.environ['RANK'])
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(rank % num_gpus)
dist.init_process_group(backend=backend, **kwargs)
def _init_dist_slurm(backend, port=None):
"""Initialize slurm distributed training environment.
If argument ``port`` is not specified, then the master port will be system
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
environment variable, then a default port ``29500`` will be used.
Args:
backend (str): Backend of torch.distributed.
port (int, optional): Master port. Defaults to None.
"""
proc_id = int(os.environ['SLURM_PROCID'])
ntasks = int(os.environ['SLURM_NTASKS'])
node_list = os.environ['SLURM_NODELIST']
num_gpus = torch.cuda.device_count()
torch.cuda.set_device(proc_id % num_gpus)
addr = subprocess.getoutput(
f'scontrol show hostname {node_list} | head -n1')
# specify master port
if port is not None:
os.environ['MASTER_PORT'] = str(port)
elif 'MASTER_PORT' in os.environ:
pass # use MASTER_PORT in the environment variable
else:
# 29500 is torch.distributed default port
os.environ['MASTER_PORT'] = '29500'
os.environ['MASTER_ADDR'] = addr
os.environ['WORLD_SIZE'] = str(ntasks)
os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
os.environ['RANK'] = str(proc_id)
dist.init_process_group(backend=backend)
# ----------------------------------
# get rank and world_size
# ----------------------------------
def get_dist_info():
if dist.is_available():
initialized = dist.is_initialized()
else:
initialized = False
if initialized:
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
rank = 0
world_size = 1
return rank, world_size
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def master_only(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rank, _ = get_dist_info()
if rank == 0:
return func(*args, **kwargs)
return wrapper
# ----------------------------------
# operation across ranks
# ----------------------------------
def reduce_sum(tensor):
if not dist.is_available():
return tensor
if not dist.is_initialized():
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return tensor
def gather_grad(params):
world_size = get_world_size()
if world_size == 1:
return
for param in params:
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data.div_(world_size)
def all_gather(data):
world_size = get_world_size()
if world_size == 1:
return [data]
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to('cuda')
local_size = torch.IntTensor([tensor.numel()]).to('cuda')
size_list = [torch.IntTensor([0]).to('cuda') for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to('cuda'))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to('cuda')
tensor = torch.cat((tensor, padding), 0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
| 5,275 | 25.118812 | 102 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_params.py | import torch
import torchvision
from models import basicblock as B
def show_kv(net):
for k, v in net.items():
print(k)
# should run train debug mode first to get an initial model
#crt_net = torch.load('../../experiments/debug_SRResNet_bicx4_in3nf64nb16/models/8_G.pth')
#
#for k, v in crt_net.items():
# print(k)
#for k, v in crt_net.items():
# if k in pretrained_net:
# crt_net[k] = pretrained_net[k]
# print('replace ... ', k)
# x2 -> x4
#crt_net['model.5.weight'] = pretrained_net['model.2.weight']
#crt_net['model.5.bias'] = pretrained_net['model.2.bias']
#crt_net['model.8.weight'] = pretrained_net['model.5.weight']
#crt_net['model.8.bias'] = pretrained_net['model.5.bias']
#crt_net['model.10.weight'] = pretrained_net['model.7.weight']
#crt_net['model.10.bias'] = pretrained_net['model.7.bias']
#torch.save(crt_net, '../pretrained_tmp.pth')
# x2 -> x3
'''
in_filter = pretrained_net['model.2.weight'] # 256, 64, 3, 3
new_filter = torch.Tensor(576, 64, 3, 3)
new_filter[0:256, :, :, :] = in_filter
new_filter[256:512, :, :, :] = in_filter
new_filter[512:, :, :, :] = in_filter[0:576-512, :, :, :]
crt_net['model.2.weight'] = new_filter
in_bias = pretrained_net['model.2.bias'] # 256, 64, 3, 3
new_bias = torch.Tensor(576)
new_bias[0:256] = in_bias
new_bias[256:512] = in_bias
new_bias[512:] = in_bias[0:576 - 512]
crt_net['model.2.bias'] = new_bias
torch.save(crt_net, '../pretrained_tmp.pth')
'''
# x2 -> x8
'''
crt_net['model.5.weight'] = pretrained_net['model.2.weight']
crt_net['model.5.bias'] = pretrained_net['model.2.bias']
crt_net['model.8.weight'] = pretrained_net['model.2.weight']
crt_net['model.8.bias'] = pretrained_net['model.2.bias']
crt_net['model.11.weight'] = pretrained_net['model.5.weight']
crt_net['model.11.bias'] = pretrained_net['model.5.bias']
crt_net['model.13.weight'] = pretrained_net['model.7.weight']
crt_net['model.13.bias'] = pretrained_net['model.7.bias']
torch.save(crt_net, '../pretrained_tmp.pth')
'''
# x3/4/8 RGB -> Y
def rgb2gray_net(net, only_input=True):
if only_input:
in_filter = net['0.weight']
in_new_filter = in_filter[:,0,:,:]*0.2989 + in_filter[:,1,:,:]*0.587 + in_filter[:,2,:,:]*0.114
in_new_filter.unsqueeze_(1)
net['0.weight'] = in_new_filter
# out_filter = pretrained_net['model.13.weight']
# out_new_filter = out_filter[0, :, :, :] * 0.2989 + out_filter[1, :, :, :] * 0.587 + \
# out_filter[2, :, :, :] * 0.114
# out_new_filter.unsqueeze_(0)
# crt_net['model.13.weight'] = out_new_filter
# out_bias = pretrained_net['model.13.bias']
# out_new_bias = out_bias[0] * 0.2989 + out_bias[1] * 0.587 + out_bias[2] * 0.114
# out_new_bias = torch.Tensor(1).fill_(out_new_bias)
# crt_net['model.13.bias'] = out_new_bias
# torch.save(crt_net, '../pretrained_tmp.pth')
return net
if __name__ == '__main__':
net = torchvision.models.vgg19(pretrained=True)
for k,v in net.features.named_parameters():
if k=='0.weight':
in_new_filter = v[:,0,:,:]*0.2989 + v[:,1,:,:]*0.587 + v[:,2,:,:]*0.114
in_new_filter.unsqueeze_(1)
v = in_new_filter
print(v.shape)
print(v[0,0,0,0])
if k=='0.bias':
in_new_bias = v
print(v[0])
print(net.features[0])
net.features[0] = B.conv(1, 64, mode='C')
print(net.features[0])
net.features[0].weight.data=in_new_filter
net.features[0].bias.data=in_new_bias
for k,v in net.features.named_parameters():
if k=='0.weight':
print(v[0,0,0,0])
if k=='0.bias':
print(v[0])
# transfer parameters of old model to new one
model_old = torch.load(model_path)
state_dict = model.state_dict()
for ((key, param),(key2, param2)) in zip(model_old.items(), state_dict.items()):
state_dict[key2] = param
print([key, key2])
# print([param.size(), param2.size()])
torch.save(state_dict, 'model_new.pth')
# rgb2gray_net(net)
| 4,039 | 28.705882 | 103 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_blindsr.py | # -*- coding: utf-8 -*-
import numpy as np
import cv2
import torch
from utils import utils_image as util
import random
from scipy import ndimage
import scipy
import scipy.stats as ss
from scipy.interpolate import interp2d
from scipy.linalg import orth
"""
# --------------------------------------------
# Super-Resolution
# --------------------------------------------
#
# Kai Zhang (cskaizhang@gmail.com)
# https://github.com/cszn
# From 2019/03--2021/08
# --------------------------------------------
"""
def modcrop_np(img, sf):
'''
Args:
img: numpy image, WxH or WxHxC
sf: scale factor
Return:
cropped image
'''
w, h = img.shape[:2]
im = np.copy(img)
return im[:w - w % sf, :h - h % sf, ...]
"""
# --------------------------------------------
# anisotropic Gaussian kernels
# --------------------------------------------
"""
def analytic_kernel(k):
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
k_size = k.shape[0]
# Calculate the big kernels size
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
# Loop over the small kernel to fill the big one
for r in range(k_size):
for c in range(k_size):
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
crop = k_size // 2
cropped_big_k = big_k[crop:-crop, crop:-crop]
# Normalize to 1
return cropped_big_k / cropped_big_k.sum()
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
""" generate an anisotropic Gaussian kernel
Args:
ksize : e.g., 15, kernel size
theta : [0, pi], rotation angle range
l1 : [0.1,50], scaling of eigenvalues
l2 : [0.1,l1], scaling of eigenvalues
If l1 = l2, will get an isotropic Gaussian kernel.
Returns:
k : kernel
"""
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
return k
def gm_blur_kernel(mean, cov, size=15):
center = size / 2.0 + 0.5
k = np.zeros([size, size])
for y in range(size):
for x in range(size):
cy = y - center + 1
cx = x - center + 1
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
k = k / np.sum(k)
return k
def shift_pixel(x, sf, upper_left=True):
"""shift pixel for super-resolution with different scale factors
Args:
x: WxHxC or WxH
sf: scale factor
upper_left: shift direction
"""
h, w = x.shape[:2]
shift = (sf-1)*0.5
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
if upper_left:
x1 = xv + shift
y1 = yv + shift
else:
x1 = xv - shift
y1 = yv - shift
x1 = np.clip(x1, 0, w-1)
y1 = np.clip(y1, 0, h-1)
if x.ndim == 2:
x = interp2d(xv, yv, x)(x1, y1)
if x.ndim == 3:
for i in range(x.shape[-1]):
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
return x
def blur(x, k):
'''
x: image, NxcxHxW
k: kernel, Nx1xhxw
'''
n, c = x.shape[:2]
p1, p2 = (k.shape[-2]-1)//2, (k.shape[-1]-1)//2
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
k = k.repeat(1,c,1,1)
k = k.view(-1, 1, k.shape[2], k.shape[3])
x = x.view(1, -1, x.shape[2], x.shape[3])
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n*c)
x = x.view(n, c, x.shape[2], x.shape[3])
return x
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
""""
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
# max_var = 2.5 * sf
"""
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
theta = np.random.rand() * np.pi # random theta
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
# Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
# Set expectation position (shifting kernel for aligned image)
MU = k_size // 2 - 0.5*(scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
MU = MU[None, None, :, None]
# Create meshgrid for Gaussian
[X,Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
Z = np.stack([X, Y], 2)[:, :, :, None]
# Calcualte Gaussian for every pixel of the kernel
ZZ = Z-MU
ZZ_t = ZZ.transpose(0,1,3,2)
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
# shift the kernel so it will be centered
#raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
# Normalize the kernel and return
#kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
kernel = raw_kernel / np.sum(raw_kernel)
return kernel
def fspecial_gaussian(hsize, sigma):
hsize = [hsize, hsize]
siz = [(hsize[0]-1.0)/2.0, (hsize[1]-1.0)/2.0]
std = sigma
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1]+1), np.arange(-siz[0], siz[0]+1))
arg = -(x*x + y*y)/(2*std*std)
h = np.exp(arg)
h[h < scipy.finfo(float).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h = h/sumh
return h
def fspecial_laplacian(alpha):
alpha = max([0, min([alpha,1])])
h1 = alpha/(alpha+1)
h2 = (1-alpha)/(alpha+1)
h = [[h1, h2, h1], [h2, -4/(alpha+1), h2], [h1, h2, h1]]
h = np.array(h)
return h
def fspecial(filter_type, *args, **kwargs):
'''
python code from:
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
'''
if filter_type == 'gaussian':
return fspecial_gaussian(*args, **kwargs)
if filter_type == 'laplacian':
return fspecial_laplacian(*args, **kwargs)
"""
# --------------------------------------------
# degradation models
# --------------------------------------------
"""
def bicubic_degradation(x, sf=3):
'''
Args:
x: HxWxC image, [0, 1]
sf: down-scale factor
Return:
bicubicly downsampled LR image
'''
x = util.imresize_np(x, scale=1/sf)
return x
def srmd_degradation(x, k, sf=3):
''' blur + bicubic downsampling
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
x = bicubic_degradation(x, sf=sf)
return x
def dpsr_degradation(x, k, sf=3):
''' bicubic downsampling + blur
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
'''
x = bicubic_degradation(x, sf=sf)
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
return x
def classical_degradation(x, k, sf=3):
''' blur + downsampling
Args:
x: HxWxC image, [0, 1]/[0, 255]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
#x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
st = 0
return x[st::sf, st::sf, ...]
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
"""USM sharpening. borrowed from real-ESRGAN
Input image: I; Blurry image: B.
1. K = I + weight * (I - B)
2. Mask = 1 if abs(I - B) > threshold, else: 0
3. Blur mask:
4. Out = Mask * K + (1 - Mask) * I
Args:
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
weight (float): Sharp weight. Default: 1.
radius (float): Kernel size of Gaussian blur. Default: 50.
threshold (int):
"""
if radius % 2 == 0:
radius += 1
blur = cv2.GaussianBlur(img, (radius, radius), 0)
residual = img - blur
mask = np.abs(residual) * 255 > threshold
mask = mask.astype('float32')
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
K = img + weight * residual
K = np.clip(K, 0, 1)
return soft_mask * K + (1 - soft_mask) * img
def add_blur(img, sf=4):
wd2 = 4.0 + sf
wd = 2.0 + 0.2*sf
if random.random() < 0.5:
l1 = wd2*random.random()
l2 = wd2*random.random()
k = anisotropic_Gaussian(ksize=2*random.randint(2,11)+3, theta=random.random()*np.pi, l1=l1, l2=l2)
else:
k = fspecial('gaussian', 2*random.randint(2,11)+3, wd*random.random())
img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
return img
def add_resize(img, sf=4):
rnum = np.random.rand()
if rnum > 0.8: # up
sf1 = random.uniform(1, 2)
elif rnum < 0.7: # down
sf1 = random.uniform(0.5/sf, 1)
else:
sf1 = 1.0
img = cv2.resize(img, (int(sf1*img.shape[1]), int(sf1*img.shape[0])), interpolation=random.choice([1, 2, 3]))
img = np.clip(img, 0.0, 1.0)
return img
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
rnum = np.random.rand()
if rnum > 0.6: # add color Gaussian noise
img += np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
elif rnum < 0.4: # add grayscale Gaussian noise
img += np.random.normal(0, noise_level/255.0, (*img.shape[:2], 1)).astype(np.float32)
else: # add noise
L = noise_level2/255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3,3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img += np.random.multivariate_normal([0,0,0], np.abs(L**2*conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
img = np.clip(img, 0.0, 1.0)
rnum = random.random()
if rnum > 0.6:
img += img*np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
elif rnum < 0.4:
img += img*np.random.normal(0, noise_level/255.0, (*img.shape[:2], 1)).astype(np.float32)
else:
L = noise_level2/255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3,3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img += img*np.random.multivariate_normal([0,0,0], np.abs(L**2*conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_Poisson_noise(img):
img = np.clip((img * 255.0).round(), 0, 255) / 255.
vals = 10**(2*random.random()+2.0) # [2, 4]
if random.random() < 0.5:
img = np.random.poisson(img * vals).astype(np.float32) / vals
else:
img_gray = np.dot(img[...,:3], [0.299, 0.587, 0.114])
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
img += noise_gray[:, :, np.newaxis]
img = np.clip(img, 0.0, 1.0)
return img
def add_JPEG_noise(img):
quality_factor = random.randint(30, 95)
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img = cv2.imdecode(encimg, 1)
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
return img
def random_crop(lq, hq, sf=4, lq_patchsize=64):
h, w = lq.shape[:2]
rnd_h = random.randint(0, h-lq_patchsize)
rnd_w = random.randint(0, w-lq_patchsize)
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize*sf, rnd_w_H:rnd_w_H + lq_patchsize*sf, :]
return lq, hq
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = img.shape[:2]
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = img.shape[:2]
if h < lq_patchsize*sf or w < lq_patchsize*sf:
raise ValueError(f'img size ({h1}X{w1}) is too small!')
hq = img.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
img = cv2.resize(img, (int(1/2*img.shape[1]), int(1/2*img.shape[0])), interpolation=random.choice([1,2,3]))
else:
img = util.imresize_np(img, 1/2, True)
img = np.clip(img, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
for i in shuffle_order:
if i == 0:
img = add_blur(img, sf=sf)
elif i == 1:
img = add_blur(img, sf=sf)
elif i == 2:
a, b = img.shape[1], img.shape[0]
# downsample2
if random.random() < 0.75:
sf1 = random.uniform(1,2*sf)
img = cv2.resize(img, (int(1/sf1*img.shape[1]), int(1/sf1*img.shape[0])), interpolation=random.choice([1,2,3]))
else:
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6*sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted/k_shifted.sum() # blur with shifted kernel
img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
img = img[0::sf, 0::sf, ...] # nearest downsampling
img = np.clip(img, 0.0, 1.0)
elif i == 3:
# downsample3
img = cv2.resize(img, (int(1/sf*a), int(1/sf*b)), interpolation=random.choice([1,2,3]))
img = np.clip(img, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
img = add_JPEG_noise(img)
elif i == 6:
# add processed camera sensor noise
if random.random() < isp_prob and isp_model is not None:
with torch.no_grad():
img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
img = add_JPEG_noise(img)
# random crop
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
return img, hq
# def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=False, lq_patchsize=64, isp_model=None):
# """
# This is an extended degradation model by combining
# the degradation models of BSRGAN and Real-ESRGAN
# ----------
# img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
# sf: scale factor
# use_shuffle: the degradation shuffle
# use_sharp: sharpening the img
# Returns
# -------
# img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
# hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
# """
# h1, w1 = img.shape[:2]
# img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
# h, w = img.shape[:2]
# if h < lq_patchsize*sf or w < lq_patchsize*sf:
# raise ValueError(f'img size ({h1}X{w1}) is too small!')
# if use_sharp:
# img = add_sharpening(img)
# hq = img.copy()
# if random.random() < shuffle_prob:
# shuffle_order = random.sample(range(13), 13)
# else:
# shuffle_order = list(range(13))
# # local shuffle for noise, JPEG is always the last one
# shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
# shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
# poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
# for i in shuffle_order:
# if i == 0:
# img = add_blur(img, sf=sf)
# elif i == 1:
# img = add_resize(img, sf=sf)
# elif i == 2:
# img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
# elif i == 3:
# if random.random() < poisson_prob:
# img = add_Poisson_noise(img)
# elif i == 4:
# if random.random() < speckle_prob:
# img = add_speckle_noise(img)
# elif i == 5:
# if random.random() < isp_prob and isp_model is not None:
# with torch.no_grad():
# img, hq = isp_model.forward(img.copy(), hq)
# elif i == 6:
# img = add_JPEG_noise(img)
# elif i == 7:
# img = add_blur(img, sf=sf)
# elif i == 8:
# img = add_resize(img, sf=sf)
# elif i == 9:
# img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
# elif i == 10:
# if random.random() < poisson_prob:
# img = add_Poisson_noise(img)
# elif i == 11:
# if random.random() < speckle_prob:
# img = add_speckle_noise(img)
# elif i == 12:
# if random.random() < isp_prob and isp_model is not None:
# with torch.no_grad():
# img, hq = isp_model.forward(img.copy(), hq)
# else:
# print('check the shuffle!')
# # resize to desired size
# img = cv2.resize(img, (int(1/sf*hq.shape[1]), int(1/sf*hq.shape[0])), interpolation=random.choice([1, 2, 3]))
# # add final JPEG compression noise
# img = add_JPEG_noise(img)
# # random crop
# img, hq = random_crop(img, hq, sf, lq_patchsize)
# return img, hq
def add_Gaussian_noise_color(img, noise_level1=2, noise_level2=25, color_ratio=1):
noise_level = random.randint(noise_level1, noise_level2)
img += np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
if __name__ == '__main__':
img = util.imread_uint('utils/test.png', 3)
img = util.uint2single(img)
sf = 4
for i in range(20):
img_lq, img_hq = degradation_bsrgan(img, sf=sf, lq_patchsize=72)
print(i)
lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf*img_lq.shape[1]), int(sf*img_lq.shape[0])), interpolation=0)
img_concat = np.concatenate([lq_nearest, util.single2uint(img_hq)], axis=1)
util.imsave(img_concat, str(i)+'.png')
# for i in range(10):
# img_lq, img_hq = degradation_bsrgan_plus(img, sf=sf, shuffle_prob=0.1, use_sharp=True, lq_patchsize=64)
# print(i)
# lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf*img_lq.shape[1]), int(sf*img_lq.shape[0])), interpolation=0)
# img_concat = np.concatenate([lq_nearest, util.single2uint(img_hq)], axis=1)
# util.imsave(img_concat, str(i)+'.png')
# run utils/utils_blindsr.py
| 21,023 | 31.85 | 147 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_deblur.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy
from scipy import fftpack
import torch
from math import cos, sin
from numpy import zeros, ones, prod, array, pi, log, min, mod, arange, sum, mgrid, exp, pad, round
from numpy.random import randn, rand
from scipy.signal import convolve2d
import cv2
import random
# import utils_image as util
'''
modified by Kai Zhang (github: https://github.com/cszn)
03/03/2019
'''
def get_uperleft_denominator(img, kernel):
'''
img: HxWxC
kernel: hxw
denominator: HxWx1
upperleft: HxWxC
'''
V = psf2otf(kernel, img.shape[:2])
denominator = np.expand_dims(np.abs(V)**2, axis=2)
upperleft = np.expand_dims(np.conj(V), axis=2) * np.fft.fft2(img, axes=[0, 1])
return upperleft, denominator
def get_uperleft_denominator_pytorch(img, kernel):
'''
img: NxCxHxW
kernel: Nx1xhxw
denominator: Nx1xHxW
upperleft: NxCxHxWx2
'''
V = p2o(kernel, img.shape[-2:]) # Nx1xHxWx2
denominator = V[..., 0]**2+V[..., 1]**2 # Nx1xHxW
upperleft = cmul(cconj(V), rfft(img)) # Nx1xHxWx2 * NxCxHxWx2
return upperleft, denominator
def c2c(x):
return torch.from_numpy(np.stack([np.float32(x.real), np.float32(x.imag)], axis=-1))
def r2c(x):
return torch.stack([x, torch.zeros_like(x)], -1)
def cdiv(x, y):
a, b = x[..., 0], x[..., 1]
c, d = y[..., 0], y[..., 1]
cd2 = c**2 + d**2
return torch.stack([(a*c+b*d)/cd2, (b*c-a*d)/cd2], -1)
def cabs(x):
return torch.pow(x[..., 0]**2+x[..., 1]**2, 0.5)
def cmul(t1, t2):
'''
complex multiplication
t1: NxCxHxWx2
output: NxCxHxWx2
'''
real1, imag1 = t1[..., 0], t1[..., 1]
real2, imag2 = t2[..., 0], t2[..., 1]
return torch.stack([real1 * real2 - imag1 * imag2, real1 * imag2 + imag1 * real2], dim=-1)
def cconj(t, inplace=False):
'''
# complex's conjugation
t: NxCxHxWx2
output: NxCxHxWx2
'''
c = t.clone() if not inplace else t
c[..., 1] *= -1
return c
def rfft(t):
return torch.rfft(t, 2, onesided=False)
def irfft(t):
return torch.irfft(t, 2, onesided=False)
def fft(t):
return torch.fft(t, 2)
def ifft(t):
return torch.ifft(t, 2)
def p2o(psf, shape):
'''
# psf: NxCxhxw
# shape: [H,W]
# otf: NxCxHxWx2
'''
otf = torch.zeros(psf.shape[:-2] + shape).type_as(psf)
otf[...,:psf.shape[2],:psf.shape[3]].copy_(psf)
for axis, axis_size in enumerate(psf.shape[2:]):
otf = torch.roll(otf, -int(axis_size / 2), dims=axis+2)
otf = torch.rfft(otf, 2, onesided=False)
n_ops = torch.sum(torch.tensor(psf.shape).type_as(psf) * torch.log2(torch.tensor(psf.shape).type_as(psf)))
otf[...,1][torch.abs(otf[...,1])<n_ops*2.22e-16] = torch.tensor(0).type_as(psf)
return otf
# otf2psf: not sure where I got this one from. Maybe translated from Octave source code or whatever. It's just math.
def otf2psf(otf, outsize=None):
insize = np.array(otf.shape)
psf = np.fft.ifftn(otf, axes=(0, 1))
for axis, axis_size in enumerate(insize):
psf = np.roll(psf, np.floor(axis_size / 2).astype(int), axis=axis)
if type(outsize) != type(None):
insize = np.array(otf.shape)
outsize = np.array(outsize)
n = max(np.size(outsize), np.size(insize))
# outsize = postpad(outsize(:), n, 1);
# insize = postpad(insize(:) , n, 1);
colvec_out = outsize.flatten().reshape((np.size(outsize), 1))
colvec_in = insize.flatten().reshape((np.size(insize), 1))
outsize = np.pad(colvec_out, ((0, max(0, n - np.size(colvec_out))), (0, 0)), mode="constant")
insize = np.pad(colvec_in, ((0, max(0, n - np.size(colvec_in))), (0, 0)), mode="constant")
pad = (insize - outsize) / 2
if np.any(pad < 0):
print("otf2psf error: OUTSIZE must be smaller than or equal than OTF size")
prepad = np.floor(pad)
postpad = np.ceil(pad)
dims_start = prepad.astype(int)
dims_end = (insize - postpad).astype(int)
for i in range(len(dims_start.shape)):
psf = np.take(psf, range(dims_start[i][0], dims_end[i][0]), axis=i)
n_ops = np.sum(otf.size * np.log2(otf.shape))
psf = np.real_if_close(psf, tol=n_ops)
return psf
# psf2otf copied/modified from https://github.com/aboucaud/pypher/blob/master/pypher/pypher.py
def psf2otf(psf, shape=None):
"""
Convert point-spread function to optical transfer function.
Compute the Fast Fourier Transform (FFT) of the point-spread
function (PSF) array and creates the optical transfer function (OTF)
array that is not influenced by the PSF off-centering.
By default, the OTF array is the same size as the PSF array.
To ensure that the OTF is not altered due to PSF off-centering, PSF2OTF
post-pads the PSF array (down or to the right) with zeros to match
dimensions specified in OUTSIZE, then circularly shifts the values of
the PSF array up (or to the left) until the central pixel reaches (1,1)
position.
Parameters
----------
psf : `numpy.ndarray`
PSF array
shape : int
Output shape of the OTF array
Returns
-------
otf : `numpy.ndarray`
OTF array
Notes
-----
Adapted from MATLAB psf2otf function
"""
if type(shape) == type(None):
shape = psf.shape
shape = np.array(shape)
if np.all(psf == 0):
# return np.zeros_like(psf)
return np.zeros(shape)
if len(psf.shape) == 1:
psf = psf.reshape((1, psf.shape[0]))
inshape = psf.shape
psf = zero_pad(psf, shape, position='corner')
for axis, axis_size in enumerate(inshape):
psf = np.roll(psf, -int(axis_size / 2), axis=axis)
# Compute the OTF
otf = np.fft.fft2(psf, axes=(0, 1))
# Estimate the rough number of operations involved in the FFT
# and discard the PSF imaginary part if within roundoff error
# roundoff error = machine epsilon = sys.float_info.epsilon
# or np.finfo().eps
n_ops = np.sum(psf.size * np.log2(psf.shape))
otf = np.real_if_close(otf, tol=n_ops)
return otf
def zero_pad(image, shape, position='corner'):
"""
Extends image to a certain size with zeros
Parameters
----------
image: real 2d `numpy.ndarray`
Input image
shape: tuple of int
Desired output shape of the image
position : str, optional
The position of the input image in the output one:
* 'corner'
top-left corner (default)
* 'center'
centered
Returns
-------
padded_img: real `numpy.ndarray`
The zero-padded image
"""
shape = np.asarray(shape, dtype=int)
imshape = np.asarray(image.shape, dtype=int)
if np.alltrue(imshape == shape):
return image
if np.any(shape <= 0):
raise ValueError("ZERO_PAD: null or negative shape given")
dshape = shape - imshape
if np.any(dshape < 0):
raise ValueError("ZERO_PAD: target size smaller than source one")
pad_img = np.zeros(shape, dtype=image.dtype)
idx, idy = np.indices(imshape)
if position == 'center':
if np.any(dshape % 2 != 0):
raise ValueError("ZERO_PAD: source and target shapes "
"have different parity.")
offx, offy = dshape // 2
else:
offx, offy = (0, 0)
pad_img[idx + offx, idy + offy] = image
return pad_img
'''
Reducing boundary artifacts
'''
def opt_fft_size(n):
'''
Kai Zhang (github: https://github.com/cszn)
03/03/2019
# opt_fft_size.m
# compute an optimal data length for Fourier transforms
# written by Sunghyun Cho (sodomau@postech.ac.kr)
# persistent opt_fft_size_LUT;
'''
LUT_size = 2048
# print("generate opt_fft_size_LUT")
opt_fft_size_LUT = np.zeros(LUT_size)
e2 = 1
while e2 <= LUT_size:
e3 = e2
while e3 <= LUT_size:
e5 = e3
while e5 <= LUT_size:
e7 = e5
while e7 <= LUT_size:
if e7 <= LUT_size:
opt_fft_size_LUT[e7-1] = e7
if e7*11 <= LUT_size:
opt_fft_size_LUT[e7*11-1] = e7*11
if e7*13 <= LUT_size:
opt_fft_size_LUT[e7*13-1] = e7*13
e7 = e7 * 7
e5 = e5 * 5
e3 = e3 * 3
e2 = e2 * 2
nn = 0
for i in range(LUT_size, 0, -1):
if opt_fft_size_LUT[i-1] != 0:
nn = i-1
else:
opt_fft_size_LUT[i-1] = nn+1
m = np.zeros(len(n))
for c in range(len(n)):
nn = n[c]
if nn <= LUT_size:
m[c] = opt_fft_size_LUT[nn-1]
else:
m[c] = -1
return m
def wrap_boundary_liu(img, img_size):
"""
Reducing boundary artifacts in image deconvolution
Renting Liu, Jiaya Jia
ICIP 2008
"""
if img.ndim == 2:
ret = wrap_boundary(img, img_size)
elif img.ndim == 3:
ret = [wrap_boundary(img[:, :, i], img_size) for i in range(3)]
ret = np.stack(ret, 2)
return ret
def wrap_boundary(img, img_size):
"""
python code from:
https://github.com/ys-koshelev/nla_deblur/blob/90fe0ab98c26c791dcbdf231fe6f938fca80e2a0/boundaries.py
Reducing boundary artifacts in image deconvolution
Renting Liu, Jiaya Jia
ICIP 2008
"""
(H, W) = np.shape(img)
H_w = int(img_size[0]) - H
W_w = int(img_size[1]) - W
# ret = np.zeros((img_size[0], img_size[1]));
alpha = 1
HG = img[:, :]
r_A = np.zeros((alpha*2+H_w, W))
r_A[:alpha, :] = HG[-alpha:, :]
r_A[-alpha:, :] = HG[:alpha, :]
a = np.arange(H_w)/(H_w-1)
# r_A(alpha+1:end-alpha, 1) = (1-a)*r_A(alpha,1) + a*r_A(end-alpha+1,1)
r_A[alpha:-alpha, 0] = (1-a)*r_A[alpha-1, 0] + a*r_A[-alpha, 0]
# r_A(alpha+1:end-alpha, end) = (1-a)*r_A(alpha,end) + a*r_A(end-alpha+1,end)
r_A[alpha:-alpha, -1] = (1-a)*r_A[alpha-1, -1] + a*r_A[-alpha, -1]
r_B = np.zeros((H, alpha*2+W_w))
r_B[:, :alpha] = HG[:, -alpha:]
r_B[:, -alpha:] = HG[:, :alpha]
a = np.arange(W_w)/(W_w-1)
r_B[0, alpha:-alpha] = (1-a)*r_B[0, alpha-1] + a*r_B[0, -alpha]
r_B[-1, alpha:-alpha] = (1-a)*r_B[-1, alpha-1] + a*r_B[-1, -alpha]
if alpha == 1:
A2 = solve_min_laplacian(r_A[alpha-1:, :])
B2 = solve_min_laplacian(r_B[:, alpha-1:])
r_A[alpha-1:, :] = A2
r_B[:, alpha-1:] = B2
else:
A2 = solve_min_laplacian(r_A[alpha-1:-alpha+1, :])
r_A[alpha-1:-alpha+1, :] = A2
B2 = solve_min_laplacian(r_B[:, alpha-1:-alpha+1])
r_B[:, alpha-1:-alpha+1] = B2
A = r_A
B = r_B
r_C = np.zeros((alpha*2+H_w, alpha*2+W_w))
r_C[:alpha, :] = B[-alpha:, :]
r_C[-alpha:, :] = B[:alpha, :]
r_C[:, :alpha] = A[:, -alpha:]
r_C[:, -alpha:] = A[:, :alpha]
if alpha == 1:
C2 = C2 = solve_min_laplacian(r_C[alpha-1:, alpha-1:])
r_C[alpha-1:, alpha-1:] = C2
else:
C2 = solve_min_laplacian(r_C[alpha-1:-alpha+1, alpha-1:-alpha+1])
r_C[alpha-1:-alpha+1, alpha-1:-alpha+1] = C2
C = r_C
# return C
A = A[alpha-1:-alpha-1, :]
B = B[:, alpha:-alpha]
C = C[alpha:-alpha, alpha:-alpha]
ret = np.vstack((np.hstack((img, B)), np.hstack((A, C))))
return ret
def solve_min_laplacian(boundary_image):
(H, W) = np.shape(boundary_image)
# Laplacian
f = np.zeros((H, W))
# boundary image contains image intensities at boundaries
boundary_image[1:-1, 1:-1] = 0
j = np.arange(2, H)-1
k = np.arange(2, W)-1
f_bp = np.zeros((H, W))
f_bp[np.ix_(j, k)] = -4*boundary_image[np.ix_(j, k)] + boundary_image[np.ix_(j, k+1)] + boundary_image[np.ix_(j, k-1)] + boundary_image[np.ix_(j-1, k)] + boundary_image[np.ix_(j+1, k)]
del(j, k)
f1 = f - f_bp # subtract boundary points contribution
del(f_bp, f)
# DST Sine Transform algo starts here
f2 = f1[1:-1,1:-1]
del(f1)
# compute sine tranform
if f2.shape[1] == 1:
tt = fftpack.dst(f2, type=1, axis=0)/2
else:
tt = fftpack.dst(f2, type=1)/2
if tt.shape[0] == 1:
f2sin = np.transpose(fftpack.dst(np.transpose(tt), type=1, axis=0)/2)
else:
f2sin = np.transpose(fftpack.dst(np.transpose(tt), type=1)/2)
del(f2)
# compute Eigen Values
[x, y] = np.meshgrid(np.arange(1, W-1), np.arange(1, H-1))
denom = (2*np.cos(np.pi*x/(W-1))-2) + (2*np.cos(np.pi*y/(H-1)) - 2)
# divide
f3 = f2sin/denom
del(f2sin, x, y)
# compute Inverse Sine Transform
if f3.shape[0] == 1:
tt = fftpack.idst(f3*2, type=1, axis=1)/(2*(f3.shape[1]+1))
else:
tt = fftpack.idst(f3*2, type=1, axis=0)/(2*(f3.shape[0]+1))
del(f3)
if tt.shape[1] == 1:
img_tt = np.transpose(fftpack.idst(np.transpose(tt)*2, type=1)/(2*(tt.shape[0]+1)))
else:
img_tt = np.transpose(fftpack.idst(np.transpose(tt)*2, type=1, axis=0)/(2*(tt.shape[1]+1)))
del(tt)
# put solution in inner points; outer points obtained from boundary image
img_direct = boundary_image
img_direct[1:-1, 1:-1] = 0
img_direct[1:-1, 1:-1] = img_tt
return img_direct
"""
Created on Thu Jan 18 15:36:32 2018
@author: italo
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
"""
"""
Syntax
h = fspecial(type)
h = fspecial('average',hsize)
h = fspecial('disk',radius)
h = fspecial('gaussian',hsize,sigma)
h = fspecial('laplacian',alpha)
h = fspecial('log',hsize,sigma)
h = fspecial('motion',len,theta)
h = fspecial('prewitt')
h = fspecial('sobel')
"""
def fspecial_average(hsize=3):
"""Smoothing filter"""
return np.ones((hsize, hsize))/hsize**2
def fspecial_disk(radius):
"""Disk filter"""
raise(NotImplemented)
rad = 0.6
crad = np.ceil(rad-0.5)
[x, y] = np.meshgrid(np.arange(-crad, crad+1), np.arange(-crad, crad+1))
maxxy = np.zeros(x.shape)
maxxy[abs(x) >= abs(y)] = abs(x)[abs(x) >= abs(y)]
maxxy[abs(y) >= abs(x)] = abs(y)[abs(y) >= abs(x)]
minxy = np.zeros(x.shape)
minxy[abs(x) <= abs(y)] = abs(x)[abs(x) <= abs(y)]
minxy[abs(y) <= abs(x)] = abs(y)[abs(y) <= abs(x)]
m1 = (rad**2 < (maxxy+0.5)**2 + (minxy-0.5)**2)*(minxy-0.5) +\
(rad**2 >= (maxxy+0.5)**2 + (minxy-0.5)**2)*\
np.sqrt((rad**2 + 0j) - (maxxy + 0.5)**2)
m2 = (rad**2 > (maxxy-0.5)**2 + (minxy+0.5)**2)*(minxy+0.5) +\
(rad**2 <= (maxxy-0.5)**2 + (minxy+0.5)**2)*\
np.sqrt((rad**2 + 0j) - (maxxy - 0.5)**2)
h = None
return h
def fspecial_gaussian(hsize, sigma):
hsize = [hsize, hsize]
siz = [(hsize[0]-1.0)/2.0, (hsize[1]-1.0)/2.0]
std = sigma
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1]+1), np.arange(-siz[0], siz[0]+1))
arg = -(x*x + y*y)/(2*std*std)
h = np.exp(arg)
h[h < scipy.finfo(float).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h = h/sumh
return h
def fspecial_laplacian(alpha):
alpha = max([0, min([alpha,1])])
h1 = alpha/(alpha+1)
h2 = (1-alpha)/(alpha+1)
h = [[h1, h2, h1], [h2, -4/(alpha+1), h2], [h1, h2, h1]]
h = np.array(h)
return h
def fspecial_log(hsize, sigma):
raise(NotImplemented)
def fspecial_motion(motion_len, theta):
raise(NotImplemented)
def fspecial_prewitt():
return np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
def fspecial_sobel():
return np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
def fspecial(filter_type, *args, **kwargs):
'''
python code from:
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
'''
if filter_type == 'average':
return fspecial_average(*args, **kwargs)
if filter_type == 'disk':
return fspecial_disk(*args, **kwargs)
if filter_type == 'gaussian':
return fspecial_gaussian(*args, **kwargs)
if filter_type == 'laplacian':
return fspecial_laplacian(*args, **kwargs)
if filter_type == 'log':
return fspecial_log(*args, **kwargs)
if filter_type == 'motion':
return fspecial_motion(*args, **kwargs)
if filter_type == 'prewitt':
return fspecial_prewitt(*args, **kwargs)
if filter_type == 'sobel':
return fspecial_sobel(*args, **kwargs)
def fspecial_gauss(size, sigma):
x, y = mgrid[-size // 2 + 1 : size // 2 + 1, -size // 2 + 1 : size // 2 + 1]
g = exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))
return g / g.sum()
def blurkernel_synthesis(h=37, w=None):
# https://github.com/tkkcc/prior/blob/879a0b6c117c810776d8cc6b63720bf29f7d0cc4/util/gen_kernel.py
w = h if w is None else w
kdims = [h, w]
x = randomTrajectory(250)
k = None
while k is None:
k = kernelFromTrajectory(x)
# center pad to kdims
pad_width = ((kdims[0] - k.shape[0]) // 2, (kdims[1] - k.shape[1]) // 2)
pad_width = [(pad_width[0],), (pad_width[1],)]
if pad_width[0][0]<0 or pad_width[1][0]<0:
k = k[0:h, 0:h]
else:
k = pad(k, pad_width, "constant")
x1,x2 = k.shape
if np.random.randint(0, 4) == 1:
k = cv2.resize(k, (random.randint(x1, 5*x1), random.randint(x2, 5*x2)), interpolation=cv2.INTER_LINEAR)
y1, y2 = k.shape
k = k[(y1-x1)//2: (y1-x1)//2+x1, (y2-x2)//2: (y2-x2)//2+x2]
if sum(k)<0.1:
k = fspecial_gaussian(h, 0.1+6*np.random.rand(1))
k = k / sum(k)
# import matplotlib.pyplot as plt
# plt.imshow(k, interpolation="nearest", cmap="gray")
# plt.show()
return k
def kernelFromTrajectory(x):
h = 5 - log(rand()) / 0.15
h = round(min([h, 27])).astype(int)
h = h + 1 - h % 2
w = h
k = zeros((h, w))
xmin = min(x[0])
xmax = max(x[0])
ymin = min(x[1])
ymax = max(x[1])
xthr = arange(xmin, xmax, (xmax - xmin) / w)
ythr = arange(ymin, ymax, (ymax - ymin) / h)
for i in range(1, xthr.size):
for j in range(1, ythr.size):
idx = (
(x[0, :] >= xthr[i - 1])
& (x[0, :] < xthr[i])
& (x[1, :] >= ythr[j - 1])
& (x[1, :] < ythr[j])
)
k[i - 1, j - 1] = sum(idx)
if sum(k) == 0:
return
k = k / sum(k)
k = convolve2d(k, fspecial_gauss(3, 1), "same")
k = k / sum(k)
return k
def randomTrajectory(T):
x = zeros((3, T))
v = randn(3, T)
r = zeros((3, T))
trv = 1 / 1
trr = 2 * pi / T
for t in range(1, T):
F_rot = randn(3) / (t + 1) + r[:, t - 1]
F_trans = randn(3) / (t + 1)
r[:, t] = r[:, t - 1] + trr * F_rot
v[:, t] = v[:, t - 1] + trv * F_trans
st = v[:, t]
st = rot3D(st, r[:, t])
x[:, t] = x[:, t - 1] + st
return x
def rot3D(x, r):
Rx = array([[1, 0, 0], [0, cos(r[0]), -sin(r[0])], [0, sin(r[0]), cos(r[0])]])
Ry = array([[cos(r[1]), 0, sin(r[1])], [0, 1, 0], [-sin(r[1]), 0, cos(r[1])]])
Rz = array([[cos(r[2]), -sin(r[2]), 0], [sin(r[2]), cos(r[2]), 0], [0, 0, 1]])
R = Rz @ Ry @ Rx
x = R @ x
return x
if __name__ == '__main__':
a = opt_fft_size([111])
print(a)
print(fspecial('gaussian', 5, 1))
print(p2o(torch.zeros(1,1,4,4).float(),(14,14)).shape)
k = blurkernel_synthesis(11)
import matplotlib.pyplot as plt
plt.imshow(k, interpolation="nearest", cmap="gray")
plt.show()
| 19,609 | 28.893293 | 188 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_model.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from utils import utils_image as util
import re
import glob
import os
'''
# --------------------------------------------
# Model
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
'''
def find_last_checkpoint(save_dir, net_type='G', pretrained_path=None):
"""
# ---------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# ---------------------------------------
Args:
save_dir: model folder
net_type: 'G' or 'D' or 'optimizerG' or 'optimizerD'
pretrained_path: pretrained model path. If save_dir does not have any model, load from pretrained_path
Return:
init_iter: iteration number
init_path: model path
# ---------------------------------------
"""
file_list = glob.glob(os.path.join(save_dir, '*_{}.pth'.format(net_type)))
if file_list:
iter_exist = []
for file_ in file_list:
iter_current = re.findall(r"(\d+)_{}.pth".format(net_type), file_)
iter_exist.append(int(iter_current[0]))
init_iter = max(iter_exist)
init_path = os.path.join(save_dir, '{}_{}.pth'.format(init_iter, net_type))
else:
init_iter = 0
init_path = pretrained_path
return init_iter, init_path
def test_mode(model, L, mode=0, refield=32, min_size=256, sf=1, modulo=1):
'''
# ---------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# ---------------------------------------
Args:
model: trained model
L: input Low-quality image
mode:
(0) normal: test(model, L)
(1) pad: test_pad(model, L, modulo=16)
(2) split: test_split(model, L, refield=32, min_size=256, sf=1, modulo=1)
(3) x8: test_x8(model, L, modulo=1) ^_^
(4) split and x8: test_split_x8(model, L, refield=32, min_size=256, sf=1, modulo=1)
refield: effective receptive filed of the network, 32 is enough
useful when split, i.e., mode=2, 4
min_size: min_sizeXmin_size image, e.g., 256X256 image
useful when split, i.e., mode=2, 4
sf: scale factor for super-resolution, otherwise 1
modulo: 1 if split
useful when pad, i.e., mode=1
Returns:
E: estimated image
# ---------------------------------------
'''
if mode == 0:
E = test(model, L)
elif mode == 1:
E = test_pad(model, L, modulo, sf)
elif mode == 2:
E = test_split(model, L, refield, min_size, sf, modulo)
elif mode == 3:
E = test_x8(model, L, modulo, sf)
elif mode == 4:
E = test_split_x8(model, L, refield, min_size, sf, modulo)
return E
'''
# --------------------------------------------
# normal (0)
# --------------------------------------------
'''
def test(model, L):
E = model(L)
return E
'''
# --------------------------------------------
# pad (1)
# --------------------------------------------
'''
def test_pad(model, L, modulo=16, sf=1):
h, w = L.size()[-2:]
paddingBottom = int(np.ceil(h/modulo)*modulo-h)
paddingRight = int(np.ceil(w/modulo)*modulo-w)
L = torch.nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(L)
E = model(L)
E = E[..., :h*sf, :w*sf]
return E
'''
# --------------------------------------------
# split (function)
# --------------------------------------------
'''
def test_split_fn(model, L, refield=32, min_size=256, sf=1, modulo=1):
"""
Args:
model: trained model
L: input Low-quality image
refield: effective receptive filed of the network, 32 is enough
min_size: min_sizeXmin_size image, e.g., 256X256 image
sf: scale factor for super-resolution, otherwise 1
modulo: 1 if split
Returns:
E: estimated result
"""
h, w = L.size()[-2:]
if h*w <= min_size**2:
L = torch.nn.ReplicationPad2d((0, int(np.ceil(w/modulo)*modulo-w), 0, int(np.ceil(h/modulo)*modulo-h)))(L)
E = model(L)
E = E[..., :h*sf, :w*sf]
else:
top = slice(0, (h//2//refield+1)*refield)
bottom = slice(h - (h//2//refield+1)*refield, h)
left = slice(0, (w//2//refield+1)*refield)
right = slice(w - (w//2//refield+1)*refield, w)
Ls = [L[..., top, left], L[..., top, right], L[..., bottom, left], L[..., bottom, right]]
if h * w <= 4*(min_size**2):
Es = [model(Ls[i]) for i in range(4)]
else:
Es = [test_split_fn(model, Ls[i], refield=refield, min_size=min_size, sf=sf, modulo=modulo) for i in range(4)]
b, c = Es[0].size()[:2]
E = torch.zeros(b, c, sf * h, sf * w).type_as(L)
E[..., :h//2*sf, :w//2*sf] = Es[0][..., :h//2*sf, :w//2*sf]
E[..., :h//2*sf, w//2*sf:w*sf] = Es[1][..., :h//2*sf, (-w + w//2)*sf:]
E[..., h//2*sf:h*sf, :w//2*sf] = Es[2][..., (-h + h//2)*sf:, :w//2*sf]
E[..., h//2*sf:h*sf, w//2*sf:w*sf] = Es[3][..., (-h + h//2)*sf:, (-w + w//2)*sf:]
return E
'''
# --------------------------------------------
# split (2)
# --------------------------------------------
'''
def test_split(model, L, refield=32, min_size=256, sf=1, modulo=1):
E = test_split_fn(model, L, refield=refield, min_size=min_size, sf=sf, modulo=modulo)
return E
'''
# --------------------------------------------
# x8 (3)
# --------------------------------------------
'''
def test_x8(model, L, modulo=1, sf=1):
E_list = [test_pad(model, util.augment_img_tensor4(L, mode=i), modulo=modulo, sf=sf) for i in range(8)]
for i in range(len(E_list)):
if i == 3 or i == 5:
E_list[i] = util.augment_img_tensor4(E_list[i], mode=8 - i)
else:
E_list[i] = util.augment_img_tensor4(E_list[i], mode=i)
output_cat = torch.stack(E_list, dim=0)
E = output_cat.mean(dim=0, keepdim=False)
return E
'''
# --------------------------------------------
# split and x8 (4)
# --------------------------------------------
'''
def test_split_x8(model, L, refield=32, min_size=256, sf=1, modulo=1):
E_list = [test_split_fn(model, util.augment_img_tensor4(L, mode=i), refield=refield, min_size=min_size, sf=sf, modulo=modulo) for i in range(8)]
for k, i in enumerate(range(len(E_list))):
if i==3 or i==5:
E_list[k] = util.augment_img_tensor4(E_list[k], mode=8-i)
else:
E_list[k] = util.augment_img_tensor4(E_list[k], mode=i)
output_cat = torch.stack(E_list, dim=0)
E = output_cat.mean(dim=0, keepdim=False)
return E
'''
# ^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-
# _^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^
# ^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-^_^-
'''
'''
# --------------------------------------------
# print
# --------------------------------------------
'''
# --------------------------------------------
# print model
# --------------------------------------------
def print_model(model):
msg = describe_model(model)
print(msg)
# --------------------------------------------
# print params
# --------------------------------------------
def print_params(model):
msg = describe_params(model)
print(msg)
'''
# --------------------------------------------
# information
# --------------------------------------------
'''
# --------------------------------------------
# model inforation
# --------------------------------------------
def info_model(model):
msg = describe_model(model)
return msg
# --------------------------------------------
# params inforation
# --------------------------------------------
def info_params(model):
msg = describe_params(model)
return msg
'''
# --------------------------------------------
# description
# --------------------------------------------
'''
# --------------------------------------------
# model name and total number of parameters
# --------------------------------------------
def describe_model(model):
if isinstance(model, torch.nn.DataParallel):
model = model.module
msg = '\n'
msg += 'models name: {}'.format(model.__class__.__name__) + '\n'
msg += 'Params number: {}'.format(sum(map(lambda x: x.numel(), model.parameters()))) + '\n'
msg += 'Net structure:\n{}'.format(str(model)) + '\n'
return msg
# --------------------------------------------
# parameters description
# --------------------------------------------
def describe_params(model):
if isinstance(model, torch.nn.DataParallel):
model = model.module
msg = '\n'
msg += ' | {:^6s} | {:^6s} | {:^6s} | {:^6s} || {:<20s}'.format('mean', 'min', 'max', 'std', 'shape', 'param_name') + '\n'
for name, param in model.state_dict().items():
if not 'num_batches_tracked' in name:
v = param.data.clone().float()
msg += ' | {:>6.3f} | {:>6.3f} | {:>6.3f} | {:>6.3f} | {} || {:s}'.format(v.mean(), v.min(), v.max(), v.std(), v.shape, name) + '\n'
return msg
if __name__ == '__main__':
class Net(torch.nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(Net, self).__init__()
self.conv = torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1)
def forward(self, x):
x = self.conv(x)
return x
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
model = Net()
model = model.eval()
print_model(model)
print_params(model)
x = torch.randn((2,3,401,401))
torch.cuda.empty_cache()
with torch.no_grad():
for mode in range(5):
y = test_mode(model, x, mode, refield=32, min_size=256, sf=1, modulo=1)
print(y.shape)
# run utils/utils_model.py
| 9,982 | 29.160121 | 148 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_regularizers.py | import torch
import torch.nn as nn
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
'''
# --------------------------------------------
# SVD Orthogonal Regularization
# --------------------------------------------
def regularizer_orth(m):
"""
# ----------------------------------------
# SVD Orthogonal Regularization
# ----------------------------------------
# Applies regularization to the training by performing the
# orthogonalization technique described in the paper
# This function is to be called by the torch.nn.Module.apply() method,
# which applies svd_orthogonalization() to every layer of the model.
# usage: net.apply(regularizer_orth)
# ----------------------------------------
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
w = m.weight.data.clone()
c_out, c_in, f1, f2 = w.size()
# dtype = m.weight.data.type()
w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
# self.netG.apply(svd_orthogonalization)
u, s, v = torch.svd(w)
s[s > 1.5] = s[s > 1.5] - 1e-4
s[s < 0.5] = s[s < 0.5] + 1e-4
w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype)
else:
pass
# --------------------------------------------
# SVD Orthogonal Regularization
# --------------------------------------------
def regularizer_orth2(m):
"""
# ----------------------------------------
# Applies regularization to the training by performing the
# orthogonalization technique described in the paper
# This function is to be called by the torch.nn.Module.apply() method,
# which applies svd_orthogonalization() to every layer of the model.
# usage: net.apply(regularizer_orth2)
# ----------------------------------------
"""
classname = m.__class__.__name__
if classname.find('Conv') != -1:
w = m.weight.data.clone()
c_out, c_in, f1, f2 = w.size()
# dtype = m.weight.data.type()
w = w.permute(2, 3, 1, 0).contiguous().view(f1*f2*c_in, c_out)
u, s, v = torch.svd(w)
s_mean = s.mean()
s[s > 1.5*s_mean] = s[s > 1.5*s_mean] - 1e-4
s[s < 0.5*s_mean] = s[s < 0.5*s_mean] + 1e-4
w = torch.mm(torch.mm(u, torch.diag(s)), v.t())
m.weight.data = w.view(f1, f2, c_in, c_out).permute(3, 2, 0, 1) # .type(dtype)
else:
pass
def regularizer_clip(m):
"""
# ----------------------------------------
# usage: net.apply(regularizer_clip)
# ----------------------------------------
"""
eps = 1e-4
c_min = -1.5
c_max = 1.5
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
w = m.weight.data.clone()
w[w > c_max] -= eps
w[w < c_min] += eps
m.weight.data = w
if m.bias is not None:
b = m.bias.data.clone()
b[b > c_max] -= eps
b[b < c_min] += eps
m.bias.data = b
# elif classname.find('BatchNorm2d') != -1:
#
# rv = m.running_var.data.clone()
# rm = m.running_mean.data.clone()
#
# if m.affine:
# m.weight.data
# m.bias.data
| 3,416 | 31.542857 | 87 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_mask.py | # -*- coding: utf-8 -*-
import numpy as np
import cv2
import torch
from utils import utils_image as util
import random
from scipy import ndimage
import scipy
import scipy.stats as ss
from scipy.interpolate import interp2d
from scipy.linalg import orth
"""
# --------------------------------------------
# Super-Resolution
# --------------------------------------------
#
# Kai Zhang (cskaizhang@gmail.com)
# https://github.com/cszn
# From 2019/03--2021/08
# --------------------------------------------
"""
def modcrop_np(img, sf):
'''
Args:
img: numpy image, WxH or WxHxC
sf: scale factor
Return:
cropped image
'''
w, h = img.shape[:2]
im = np.copy(img)
return im[:w - w % sf, :h - h % sf, ...]
"""
# --------------------------------------------
# anisotropic Gaussian kernels
# --------------------------------------------
"""
def analytic_kernel(k):
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
k_size = k.shape[0]
# Calculate the big kernels size
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
# Loop over the small kernel to fill the big one
for r in range(k_size):
for c in range(k_size):
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
crop = k_size // 2
cropped_big_k = big_k[crop:-crop, crop:-crop]
# Normalize to 1
return cropped_big_k / cropped_big_k.sum()
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
""" generate an anisotropic Gaussian kernel
Args:
ksize : e.g., 15, kernel size
theta : [0, pi], rotation angle range
l1 : [0.1,50], scaling of eigenvalues
l2 : [0.1,l1], scaling of eigenvalues
If l1 = l2, will get an isotropic Gaussian kernel.
Returns:
k : kernel
"""
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
return k
def gm_blur_kernel(mean, cov, size=15):
center = size / 2.0 + 0.5
k = np.zeros([size, size])
for y in range(size):
for x in range(size):
cy = y - center + 1
cx = x - center + 1
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
k = k / np.sum(k)
return k
def shift_pixel(x, sf, upper_left=True):
"""shift pixel for super-resolution with different scale factors
Args:
x: WxHxC or WxH
sf: scale factor
upper_left: shift direction
"""
h, w = x.shape[:2]
shift = (sf-1)*0.5
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
if upper_left:
x1 = xv + shift
y1 = yv + shift
else:
x1 = xv - shift
y1 = yv - shift
x1 = np.clip(x1, 0, w-1)
y1 = np.clip(y1, 0, h-1)
if x.ndim == 2:
x = interp2d(xv, yv, x)(x1, y1)
if x.ndim == 3:
for i in range(x.shape[-1]):
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
return x
def blur(x, k):
'''
x: image, NxcxHxW
k: kernel, Nx1xhxw
'''
n, c = x.shape[:2]
p1, p2 = (k.shape[-2]-1)//2, (k.shape[-1]-1)//2
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
k = k.repeat(1,c,1,1)
k = k.view(-1, 1, k.shape[2], k.shape[3])
x = x.view(1, -1, x.shape[2], x.shape[3])
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n*c)
x = x.view(n, c, x.shape[2], x.shape[3])
return x
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
""""
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
# max_var = 2.5 * sf
"""
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
theta = np.random.rand() * np.pi # random theta
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
# Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
# Set expectation position (shifting kernel for aligned image)
MU = k_size // 2 - 0.5*(scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
MU = MU[None, None, :, None]
# Create meshgrid for Gaussian
[X,Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
Z = np.stack([X, Y], 2)[:, :, :, None]
# Calcualte Gaussian for every pixel of the kernel
ZZ = Z-MU
ZZ_t = ZZ.transpose(0,1,3,2)
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
# shift the kernel so it will be centered
#raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
# Normalize the kernel and return
#kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
kernel = raw_kernel / np.sum(raw_kernel)
return kernel
def fspecial_gaussian(hsize, sigma):
hsize = [hsize, hsize]
siz = [(hsize[0]-1.0)/2.0, (hsize[1]-1.0)/2.0]
std = sigma
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1]+1), np.arange(-siz[0], siz[0]+1))
arg = -(x*x + y*y)/(2*std*std)
h = np.exp(arg)
h[h < scipy.finfo(float).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h = h/sumh
return h
def fspecial_laplacian(alpha):
alpha = max([0, min([alpha,1])])
h1 = alpha/(alpha+1)
h2 = (1-alpha)/(alpha+1)
h = [[h1, h2, h1], [h2, -4/(alpha+1), h2], [h1, h2, h1]]
h = np.array(h)
return h
def fspecial(filter_type, *args, **kwargs):
'''
python code from:
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
'''
if filter_type == 'gaussian':
return fspecial_gaussian(*args, **kwargs)
if filter_type == 'laplacian':
return fspecial_laplacian(*args, **kwargs)
"""
# --------------------------------------------
# degradation models
# --------------------------------------------
"""
def bicubic_degradation(x, sf=3):
'''
Args:
x: HxWxC image, [0, 1]
sf: down-scale factor
Return:
bicubicly downsampled LR image
'''
x = util.imresize_np(x, scale=1/sf)
return x
def srmd_degradation(x, k, sf=3):
''' blur + bicubic downsampling
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
x = bicubic_degradation(x, sf=sf)
return x
def dpsr_degradation(x, k, sf=3):
''' bicubic downsampling + blur
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
'''
x = bicubic_degradation(x, sf=sf)
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
return x
def classical_degradation(x, k, sf=3):
''' blur + downsampling
Args:
x: HxWxC image, [0, 1]/[0, 255]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
#x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
st = 0
return x[st::sf, st::sf, ...]
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
"""USM sharpening. borrowed from real-ESRGAN
Input image: I; Blurry image: B.
1. K = I + weight * (I - B)
2. Mask = 1 if abs(I - B) > threshold, else: 0
3. Blur mask:
4. Out = Mask * K + (1 - Mask) * I
Args:
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
weight (float): Sharp weight. Default: 1.
radius (float): Kernel size of Gaussian blur. Default: 50.
threshold (int):
"""
if radius % 2 == 0:
radius += 1
blur = cv2.GaussianBlur(img, (radius, radius), 0)
residual = img - blur
mask = np.abs(residual) * 255 > threshold
mask = mask.astype('float32')
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
K = img + weight * residual
K = np.clip(K, 0, 1)
return soft_mask * K + (1 - soft_mask) * img
def add_blur(img, sf=4):
wd2 = 4.0 + sf
wd = 2.0 + 0.2*sf
if random.random() < 0.5:
l1 = wd2*random.random()
l2 = wd2*random.random()
k = anisotropic_Gaussian(ksize=2*random.randint(2,11)+3, theta=random.random()*np.pi, l1=l1, l2=l2)
else:
k = fspecial('gaussian', 2*random.randint(2,11)+3, wd*random.random())
img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
return img
def add_resize(img, sf=4):
rnum = np.random.rand()
if rnum > 0.8: # up
sf1 = random.uniform(1, 2)
elif rnum < 0.7: # down
sf1 = random.uniform(0.5/sf, 1)
else:
sf1 = 1.0
img = cv2.resize(img, (int(sf1*img.shape[1]), int(sf1*img.shape[0])), interpolation=random.choice([1, 2, 3]))
img = np.clip(img, 0.0, 1.0)
return img
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
rnum = np.random.rand()
if rnum > 0.6: # add color Gaussian noise
img += np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
elif rnum < 0.4: # add grayscale Gaussian noise
img += np.random.normal(0, noise_level/255.0, (*img.shape[:2], 1)).astype(np.float32)
else: # add noise
L = noise_level2/255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3,3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img += np.random.multivariate_normal([0,0,0], np.abs(L**2*conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
img = np.clip(img, 0.0, 1.0)
rnum = random.random()
if rnum > 0.6:
img += img*np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
elif rnum < 0.4:
img += img*np.random.normal(0, noise_level/255.0, (*img.shape[:2], 1)).astype(np.float32)
else:
L = noise_level2/255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3,3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img += img*np.random.multivariate_normal([0,0,0], np.abs(L**2*conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_Poisson_noise(img):
img = np.clip((img * 255.0).round(), 0, 255) / 255.
vals = 10**(2*random.random()+2.0) # [2, 4]
if random.random() < 0.5:
img = np.random.poisson(img * vals).astype(np.float32) / vals
else:
img_gray = np.dot(img[...,:3], [0.299, 0.587, 0.114])
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
img += noise_gray[:, :, np.newaxis]
img = np.clip(img, 0.0, 1.0)
return img
def add_JPEG_noise(img):
quality_factor = random.randint(30, 95)
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img = cv2.imdecode(encimg, 1)
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
return img
def random_crop(lq, hq, sf=4, lq_patchsize=64):
h, w = lq.shape[:2]
rnd_h = random.randint(0, h-lq_patchsize)
rnd_w = random.randint(0, w-lq_patchsize)
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize*sf, rnd_w_H:rnd_w_H + lq_patchsize*sf, :]
return lq, hq
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = img.shape[:2]
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = img.shape[:2]
if h < lq_patchsize*sf or w < lq_patchsize*sf:
raise ValueError(f'img size ({h1}X{w1}) is too small!')
hq = img.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
img = cv2.resize(img, (int(1/2*img.shape[1]), int(1/2*img.shape[0])), interpolation=random.choice([1,2,3]))
else:
img = util.imresize_np(img, 1/2, True)
img = np.clip(img, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
for i in shuffle_order:
if i == 0:
img = add_blur(img, sf=sf)
elif i == 1:
img = add_blur(img, sf=sf)
elif i == 2:
a, b = img.shape[1], img.shape[0]
# downsample2
if random.random() < 0.75:
sf1 = random.uniform(1,2*sf)
img = cv2.resize(img, (int(1/sf1*img.shape[1]), int(1/sf1*img.shape[0])), interpolation=random.choice([1,2,3]))
else:
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6*sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted/k_shifted.sum() # blur with shifted kernel
img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
img = img[0::sf, 0::sf, ...] # nearest downsampling
img = np.clip(img, 0.0, 1.0)
elif i == 3:
# downsample3
img = cv2.resize(img, (int(1/sf*a), int(1/sf*b)), interpolation=random.choice([1,2,3]))
img = np.clip(img, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
img = add_JPEG_noise(img)
elif i == 6:
# add processed camera sensor noise
if random.random() < isp_prob and isp_model is not None:
with torch.no_grad():
img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
img = add_JPEG_noise(img)
# random crop
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
return img, hq
def add_Gaussian_noise_color(img, noise_level1=2, noise_level2=25, color_ratio=1):
noise_level = random.randint(noise_level1, noise_level2)
img += np.random.normal(0, noise_level/255.0, img.shape).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def input_mask(image, prob_=0.75, value=0.1):
"""
Multiplicative bernoulli
"""
x = image.shape[0]
y = image.shape[1]
mask = np.random.choice([0, 1], size=(x, y), p=[prob_, 1 - prob_])
mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)
noise_image = np.multiply(image, mask)
noise_image = noise_image - value + value * mask
return noise_image
def input_mask_with_noise(img, sf=1, lq_patchsize=64, noise_level=15, if_mask=True, mask1=75, mask2=75):
h1, w1 = img.shape[:2]
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = img.shape[:2]
if h < lq_patchsize*sf or w < lq_patchsize*sf:
raise ValueError(f'img size ({h1}X{w1}) is too small!')
hq = img.copy()
if noise_level > 0:
img = add_Gaussian_noise_color(img, noise_level1=noise_level, noise_level2=noise_level)
img, hq = random_crop(img, hq, sf, lq_patchsize)
if if_mask:
prob = random.randint(mask1, mask2) / 100
# prob = 0.75
img = input_mask(img, prob_=prob)
return img, hq
# def add_m_bernoulli_noise_with_mask(image, mask, prob_=0.75, value=0.1):
# noise_image = np.multiply(image, mask)
# noise_image = noise_image - value + value * mask
# return noise_image
# def input_mask_with_noise_mask(img, sf=4, shuffle_prob=0.5, use_sharp=False, lq_patchsize=64,
# isp_model=None, noise_level=15, if_mask=True, mask1=75, mask2=75):
# # print(img.shape)
# h1, w1 = img.shape[:2]
# img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
# h, w = img.shape[:2]
# if h < lq_patchsize*sf or w < lq_patchsize*sf:
# raise ValueError(f'img size ({h1}X{w1}) is too small!')
# if use_sharp:
# img = add_sharpening(img)
# hq = img.copy()
# if noise_level > 0:
# img = add_Gaussian_noise_color(img, noise_level1=noise_level, noise_level2=noise_level)
# img, hq = random_crop(img, hq, sf, lq_patchsize)
# if if_mask:
# prob = random.randint(mask1, mask2) / 100
# x = img.shape[0]
# y = img.shape[1]
# mask = np.random.choice([0, 1], size=(x, y), p=[prob, 1 - prob])
# mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2)
# img = add_m_bernoulli_noise_with_mask(img, mask, prob_=prob)
# mask_inv = mask * -1 + 1
# hq = add_m_bernoulli_noise_with_mask(hq, mask_inv, prob_=prob, value=0)
# # print(type(mask))
# return img, hq, mask_inv
# if __name__ == '__main__':
# img = util.imread_uint('utils/test.png', 3)
# img = util.uint2single(img)
# sf = 4
# for i in range(20):
# img_lq, img_hq = degradation_bsrgan(img, sf=sf, lq_patchsize=72)
# print(i)
# lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf*img_lq.shape[1]), int(sf*img_lq.shape[0])), interpolation=0)
# img_concat = np.concatenate([lq_nearest, util.single2uint(img_hq)], axis=1)
# util.imsave(img_concat, str(i)+'.png')
| 19,881 | 31.119548 | 147 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_bnorm.py | import torch
import torch.nn as nn
"""
# --------------------------------------------
# Batch Normalization
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
# https://github.com/cszn
# 01/Jan/2019
# --------------------------------------------
"""
# --------------------------------------------
# remove/delete specified layer
# --------------------------------------------
def deleteLayer(model, layer_type=nn.BatchNorm2d):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if isinstance(m, layer_type):
del model._modules[k]
deleteLayer(m, layer_type)
# --------------------------------------------
# merge bn, "conv+bn" --> "conv"
# --------------------------------------------
def merge_bn(model):
''' Kai Zhang, 11/Jan/2019.
merge all 'Conv+BN' (or 'TConv+BN') into 'Conv' (or 'TConv')
based on https://github.com/pytorch/pytorch/pull/901
'''
prev_m = None
for k, m in list(model.named_children()):
if (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d)) and (isinstance(prev_m, nn.Conv2d) or isinstance(prev_m, nn.Linear) or isinstance(prev_m, nn.ConvTranspose2d)):
w = prev_m.weight.data
if prev_m.bias is None:
zeros = torch.Tensor(prev_m.out_channels).zero_().type(w.type())
prev_m.bias = nn.Parameter(zeros)
b = prev_m.bias.data
invstd = m.running_var.clone().add_(m.eps).pow_(-0.5)
if isinstance(prev_m, nn.ConvTranspose2d):
w.mul_(invstd.view(1, w.size(1), 1, 1).expand_as(w))
else:
w.mul_(invstd.view(w.size(0), 1, 1, 1).expand_as(w))
b.add_(-m.running_mean).mul_(invstd)
if m.affine:
if isinstance(prev_m, nn.ConvTranspose2d):
w.mul_(m.weight.data.view(1, w.size(1), 1, 1).expand_as(w))
else:
w.mul_(m.weight.data.view(w.size(0), 1, 1, 1).expand_as(w))
b.mul_(m.weight.data).add_(m.bias.data)
del model._modules[k]
prev_m = m
merge_bn(m)
# --------------------------------------------
# add bn, "conv" --> "conv+bn"
# --------------------------------------------
def add_bn(model):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if (isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear) or isinstance(m, nn.ConvTranspose2d)):
b = nn.BatchNorm2d(m.out_channels, momentum=0.1, affine=True)
b.weight.data.fill_(1)
new_m = nn.Sequential(model._modules[k], b)
model._modules[k] = new_m
add_bn(m)
# --------------------------------------------
# tidy model after removing bn
# --------------------------------------------
def tidy_sequential(model):
''' Kai Zhang, 11/Jan/2019.
'''
for k, m in list(model.named_children()):
if isinstance(m, nn.Sequential):
if m.__len__() == 1:
model._modules[k] = m.__getitem__(0)
tidy_sequential(m)
| 3,132 | 33.054348 | 187 | py |
MaskedDenoising | MaskedDenoising-main/utils/utils_modelsummary.py | import torch.nn as nn
import torch
import numpy as np
'''
---- 1) FLOPs: floating point operations
---- 2) #Activations: the number of elements of all ‘Conv2d’ outputs
---- 3) #Conv2d: the number of ‘Conv2d’ layers
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 21/July/2020
# --------------------------------------------
# Reference
https://github.com/sovrasov/flops-counter.pytorch.git
# If you use this code, please consider the following citation:
@inproceedings{zhang2020aim, %
title={AIM 2020 Challenge on Efficient Super-Resolution: Methods and Results},
author={Kai Zhang and Martin Danelljan and Yawei Li and Radu Timofte and others},
booktitle={European Conference on Computer Vision Workshops},
year={2020}
}
# --------------------------------------------
'''
def get_model_flops(model, input_res, print_per_layer_stat=True,
input_constructor=None):
assert type(input_res) is tuple, 'Please provide the size of the input image.'
assert len(input_res) >= 3, 'Input image should have 3 dimensions.'
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
device = list(flops_model.parameters())[-1].device
batch = torch.FloatTensor(1, *input_res).to(device)
_ = flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model)
flops_count = flops_model.compute_average_flops_cost()
flops_model.stop_flops_count()
return flops_count
def get_model_activation(model, input_res, input_constructor=None):
assert type(input_res) is tuple, 'Please provide the size of the input image.'
assert len(input_res) >= 3, 'Input image should have 3 dimensions.'
activation_model = add_activation_counting_methods(model)
activation_model.eval().start_activation_count()
if input_constructor:
input = input_constructor(input_res)
_ = activation_model(**input)
else:
device = list(activation_model.parameters())[-1].device
batch = torch.FloatTensor(1, *input_res).to(device)
_ = activation_model(batch)
activation_count, num_conv = activation_model.compute_average_activation_cost()
activation_model.stop_activation_count()
return activation_count, num_conv
def get_model_complexity_info(model, input_res, print_per_layer_stat=True, as_strings=True,
input_constructor=None):
assert type(input_res) is tuple
assert len(input_res) >= 3
flops_model = add_flops_counting_methods(model)
flops_model.eval().start_flops_count()
if input_constructor:
input = input_constructor(input_res)
_ = flops_model(**input)
else:
batch = torch.FloatTensor(1, *input_res)
_ = flops_model(batch)
if print_per_layer_stat:
print_model_with_flops(flops_model)
flops_count = flops_model.compute_average_flops_cost()
params_count = get_model_parameters_number(flops_model)
flops_model.stop_flops_count()
if as_strings:
return flops_to_string(flops_count), params_to_string(params_count)
return flops_count, params_count
def flops_to_string(flops, units='GMac', precision=2):
if units is None:
if flops // 10**9 > 0:
return str(round(flops / 10.**9, precision)) + ' GMac'
elif flops // 10**6 > 0:
return str(round(flops / 10.**6, precision)) + ' MMac'
elif flops // 10**3 > 0:
return str(round(flops / 10.**3, precision)) + ' KMac'
else:
return str(flops) + ' Mac'
else:
if units == 'GMac':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MMac':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KMac':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' Mac'
def params_to_string(params_num):
if params_num // 10 ** 6 > 0:
return str(round(params_num / 10 ** 6, 2)) + ' M'
elif params_num // 10 ** 3:
return str(round(params_num / 10 ** 3, 2)) + ' k'
else:
return str(params_num)
def print_model_with_flops(model, units='GMac', precision=3):
total_flops = model.compute_average_flops_cost()
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def flops_repr(self):
accumulated_flops_cost = self.accumulate_flops()
return ', '.join([flops_to_string(accumulated_flops_cost, units=units, precision=precision),
'{:.3%} MACs'.format(accumulated_flops_cost / total_flops),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
flops_extra_repr = flops_repr.__get__(m)
if m.extra_repr != flops_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = flops_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
model.apply(add_extra_repr)
print(model)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_flops_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
# embed()
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
self.apply(add_flops_counter_variable_or_reset)
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, (nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d)):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, (nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6)):
handle = module.register_forward_hook(relu_flops_counter_hook)
elif isinstance(module, nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, (nn.BatchNorm2d)):
handle = module.register_forward_hook(bn_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
# ---- Internal functions
def is_supported_instance(module):
if isinstance(module,
(
nn.Conv2d, nn.ConvTranspose2d,
nn.BatchNorm2d,
nn.Linear,
nn.ReLU, nn.PReLU, nn.ELU, nn.LeakyReLU, nn.ReLU6,
)):
return True
return False
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
# input = input[0]
batch_size = output.shape[0]
output_dims = list(output.shape[2:])
kernel_dims = list(conv_module.kernel_size)
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
groups = conv_module.groups
filters_per_channel = out_channels // groups
conv_per_position_flops = np.prod(kernel_dims) * in_channels * filters_per_channel
active_elements_count = batch_size * np.prod(output_dims)
overall_conv_flops = int(conv_per_position_flops) * int(active_elements_count)
# overall_flops = overall_conv_flops
conv_module.__flops__ += int(overall_conv_flops)
# conv_module.__output_dims__ = output_dims
def relu_flops_counter_hook(module, input, output):
active_elements_count = output.numel()
module.__flops__ += int(active_elements_count)
# print(module.__flops__, id(module))
# print(module)
def linear_flops_counter_hook(module, input, output):
input = input[0]
if len(input.shape) == 1:
batch_size = 1
module.__flops__ += int(batch_size * input.shape[0] * output.shape[0])
else:
batch_size = input.shape[0]
module.__flops__ += int(batch_size * input.shape[1] * output.shape[1])
def bn_flops_counter_hook(module, input, output):
# input = input[0]
# TODO: need to check here
# batch_flops = np.prod(input.shape)
# if module.affine:
# batch_flops *= 2
# module.__flops__ += int(batch_flops)
batch = output.shape[0]
output_dims = output.shape[2:]
channels = module.num_features
batch_flops = batch * channels * np.prod(output_dims)
if module.affine:
batch_flops *= 2
module.__flops__ += int(batch_flops)
# ---- Count the number of convolutional layers and the activation
def add_activation_counting_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
# embed()
net_main_module.start_activation_count = start_activation_count.__get__(net_main_module)
net_main_module.stop_activation_count = stop_activation_count.__get__(net_main_module)
net_main_module.reset_activation_count = reset_activation_count.__get__(net_main_module)
net_main_module.compute_average_activation_cost = compute_average_activation_cost.__get__(net_main_module)
net_main_module.reset_activation_count()
return net_main_module
def compute_average_activation_cost(self):
"""
A method that will be available after add_activation_counting_methods() is called
on a desired net object.
Returns current mean activation consumption per image.
"""
activation_sum = 0
num_conv = 0
for module in self.modules():
if is_supported_instance_for_activation(module):
activation_sum += module.__activation__
num_conv += module.__num_conv__
return activation_sum, num_conv
def start_activation_count(self):
"""
A method that will be available after add_activation_counting_methods() is called
on a desired net object.
Activates the computation of mean activation consumption per image.
Call it before you run the network.
"""
self.apply(add_activation_counter_hook_function)
def stop_activation_count(self):
"""
A method that will be available after add_activation_counting_methods() is called
on a desired net object.
Stops computing the mean activation consumption per image.
Call whenever you want to pause the computation.
"""
self.apply(remove_activation_counter_hook_function)
def reset_activation_count(self):
"""
A method that will be available after add_activation_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
self.apply(add_activation_counter_variable_or_reset)
def add_activation_counter_hook_function(module):
if is_supported_instance_for_activation(module):
if hasattr(module, '__activation_handle__'):
return
if isinstance(module, (nn.Conv2d, nn.ConvTranspose2d)):
handle = module.register_forward_hook(conv_activation_counter_hook)
module.__activation_handle__ = handle
def remove_activation_counter_hook_function(module):
if is_supported_instance_for_activation(module):
if hasattr(module, '__activation_handle__'):
module.__activation_handle__.remove()
del module.__activation_handle__
def add_activation_counter_variable_or_reset(module):
if is_supported_instance_for_activation(module):
module.__activation__ = 0
module.__num_conv__ = 0
def is_supported_instance_for_activation(module):
if isinstance(module,
(
nn.Conv2d, nn.ConvTranspose2d,
)):
return True
return False
def conv_activation_counter_hook(module, input, output):
"""
Calculate the activations in the convolutional operation.
Reference: Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár, Designing Network Design Spaces.
:param module:
:param input:
:param output:
:return:
"""
module.__activation__ += output.numel()
module.__num_conv__ += 1
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += int(output_elements_count)
def pool_flops_counter_hook(module, input, output):
input = input[0]
module.__flops__ += int(np.prod(input.shape))
def dconv_flops_counter_hook(dconv_module, input, output):
input = input[0]
batch_size = input.shape[0]
output_dims = list(output.shape[2:])
m_channels, in_channels, kernel_dim1, _, = dconv_module.weight.shape
out_channels, _, kernel_dim2, _, = dconv_module.projection.shape
# groups = dconv_module.groups
# filters_per_channel = out_channels // groups
conv_per_position_flops1 = kernel_dim1 ** 2 * in_channels * m_channels
conv_per_position_flops2 = kernel_dim2 ** 2 * out_channels * m_channels
active_elements_count = batch_size * np.prod(output_dims)
overall_conv_flops = (conv_per_position_flops1 + conv_per_position_flops2) * active_elements_count
overall_flops = overall_conv_flops
dconv_module.__flops__ += int(overall_flops)
# dconv_module.__output_dims__ = output_dims
| 16,097 | 32.123457 | 129 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_video_test.py | import glob
import torch
from os import path as osp
import torch.utils.data as data
import utils.utils_video as utils_video
class VideoRecurrentTestDataset(data.Dataset):
"""Video test dataset for recurrent architectures, which takes LR video
frames as input and output corresponding HR video frames. Modified from
https://github.com/xinntao/BasicSR/blob/master/basicsr/data/reds_dataset.py
Supported datasets: Vid4, REDS4, REDSofficial.
More generally, it supports testing dataset with following structures:
dataroot
├── subfolder1
├── frame000
├── frame001
├── ...
├── subfolder1
├── frame000
├── frame001
├── ...
├── ...
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(VideoRecurrentTestDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
self.imgs_lq, self.imgs_gt = {}, {}
if 'meta_info_file' in opt:
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
subfolders_gt = [osp.join(self.gt_root, key) for key in subfolders]
else:
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
subfolders_gt = sorted(glob.glob(osp.join(self.gt_root, '*')))
for subfolder_lq, subfolder_gt in zip(subfolders_lq, subfolders_gt):
# get frame list for lq and gt
subfolder_name = osp.basename(subfolder_lq)
img_paths_lq = sorted(list(utils_video.scandir(subfolder_lq, full_path=True)))
img_paths_gt = sorted(list(utils_video.scandir(subfolder_gt, full_path=True)))
max_idx = len(img_paths_lq)
assert max_idx == len(img_paths_gt), (f'Different number of images in lq ({max_idx})'
f' and gt folders ({len(img_paths_gt)})')
self.data_info['lq_path'].extend(img_paths_lq)
self.data_info['gt_path'].extend(img_paths_gt)
self.data_info['folder'].extend([subfolder_name] * max_idx)
for i in range(max_idx):
self.data_info['idx'].append(f'{i}/{max_idx}')
border_l = [0] * max_idx
for i in range(self.opt['num_frame'] // 2):
border_l[i] = 1
border_l[max_idx - i - 1] = 1
self.data_info['border'].extend(border_l)
# cache data or save the frame list
if self.cache_data:
print(f'Cache {subfolder_name} for VideoTestDataset...')
self.imgs_lq[subfolder_name] = utils_video.read_img_seq(img_paths_lq)
self.imgs_gt[subfolder_name] = utils_video.read_img_seq(img_paths_gt)
else:
self.imgs_lq[subfolder_name] = img_paths_lq
self.imgs_gt[subfolder_name] = img_paths_gt
# Find unique folder strings
self.folders = sorted(list(set(self.data_info['folder'])))
self.sigma = opt['sigma'] / 255. if 'sigma' in opt else 0 # for non-blind video denoising
def __getitem__(self, index):
folder = self.folders[index]
if self.sigma:
# for non-blind video denoising
if self.cache_data:
imgs_gt = self.imgs_gt[folder]
else:
imgs_gt = utils_video.read_img_seq(self.imgs_gt[folder])
torch.manual_seed(0)
noise_level = torch.ones((1, 1, 1, 1)) * self.sigma
noise = torch.normal(mean=0, std=noise_level.expand_as(imgs_gt))
imgs_lq = imgs_gt + noise
t, _, h, w = imgs_lq.shape
imgs_lq = torch.cat([imgs_lq, noise_level.expand(t, 1, h, w)], 1)
else:
# for video sr and deblurring
if self.cache_data:
imgs_lq = self.imgs_lq[folder]
imgs_gt = self.imgs_gt[folder]
else:
imgs_lq = utils_video.read_img_seq(self.imgs_lq[folder])
imgs_gt = utils_video.read_img_seq(self.imgs_gt[folder])
return {
'L': imgs_lq,
'H': imgs_gt,
'folder': folder,
'lq_path': self.imgs_lq[folder],
}
def __len__(self):
return len(self.folders)
class SingleVideoRecurrentTestDataset(data.Dataset):
"""Single ideo test dataset for recurrent architectures, which takes LR video
frames as input and output corresponding HR video frames (only input LQ path).
More generally, it supports testing dataset with following structures:
dataroot
├── subfolder1
├── frame000
├── frame001
├── ...
├── subfolder1
├── frame000
├── frame001
├── ...
├── ...
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(SingleVideoRecurrentTestDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
self.lq_root = opt['dataroot_lq']
self.data_info = {'lq_path': [], 'folder': [], 'idx': [], 'border': []}
self.imgs_lq = {}
if 'meta_info_file' in opt:
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
else:
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
for subfolder_lq in subfolders_lq:
# get frame list for lq and gt
subfolder_name = osp.basename(subfolder_lq)
img_paths_lq = sorted(list(utils_video.scandir(subfolder_lq, full_path=True)))
max_idx = len(img_paths_lq)
self.data_info['lq_path'].extend(img_paths_lq)
self.data_info['folder'].extend([subfolder_name] * max_idx)
for i in range(max_idx):
self.data_info['idx'].append(f'{i}/{max_idx}')
border_l = [0] * max_idx
for i in range(self.opt['num_frame'] // 2):
border_l[i] = 1
border_l[max_idx - i - 1] = 1
self.data_info['border'].extend(border_l)
# cache data or save the frame list
if self.cache_data:
print(f'Cache {subfolder_name} for VideoTestDataset...')
self.imgs_lq[subfolder_name] = utils_video.read_img_seq(img_paths_lq)
else:
self.imgs_lq[subfolder_name] = img_paths_lq
# Find unique folder strings
self.folders = sorted(list(set(self.data_info['folder'])))
def __getitem__(self, index):
folder = self.folders[index]
if self.cache_data:
imgs_lq = self.imgs_lq[folder]
else:
imgs_lq = utils_video.read_img_seq(self.imgs_lq[folder])
return {
'L': imgs_lq,
'folder': folder,
'lq_path': self.imgs_lq[folder],
}
def __len__(self):
return len(self.folders)
class VideoTestVimeo90KDataset(data.Dataset):
"""Video test dataset for Vimeo90k-Test dataset.
It only keeps the center frame for testing.
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(VideoTestVimeo90KDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
if self.cache_data:
raise NotImplementedError('cache_data in Vimeo90K-Test dataset is not implemented.')
self.gt_root, self.lq_root = opt['dataroot_gt'], opt['dataroot_lq']
self.data_info = {'lq_path': [], 'gt_path': [], 'folder': [], 'idx': [], 'border': []}
neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])]
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
for idx, subfolder in enumerate(subfolders):
gt_path = osp.join(self.gt_root, subfolder, 'im4.png')
self.data_info['gt_path'].append(gt_path)
lq_paths = [osp.join(self.lq_root, subfolder, f'im{i}.png') for i in neighbor_list]
self.data_info['lq_path'].append(lq_paths)
self.data_info['folder'].append('vimeo90k')
self.data_info['idx'].append(f'{idx}/{len(subfolders)}')
self.data_info['border'].append(0)
self.pad_sequence = opt.get('pad_sequence', False)
def __getitem__(self, index):
lq_path = self.data_info['lq_path'][index]
gt_path = self.data_info['gt_path'][index]
imgs_lq = utils_video.read_img_seq(lq_path)
img_gt = utils_video.read_img_seq([gt_path])
img_gt.squeeze_(0)
if self.pad_sequence: # pad the sequence: 7 frames to 8 frames
imgs_lq = torch.cat([imgs_lq, imgs_lq[-1:,...]], dim=0)
return {
'L': imgs_lq, # (t, c, h, w)
'H': img_gt, # (c, h, w)
'folder': self.data_info['folder'][index], # folder name
'idx': self.data_info['idx'][index], # e.g., 0/843
'border': self.data_info['border'][index], # 0 for non-border
'lq_path': lq_path[self.opt['num_frame'] // 2] # center frame
}
def __len__(self):
return len(self.data_info['gt_path'])
class SingleVideoRecurrentTestDataset(data.Dataset):
"""Single Video test dataset (only input LQ path).
Supported datasets: Vid4, REDS4, REDSofficial.
More generally, it supports testing dataset with following structures:
dataroot
├── subfolder1
├── frame000
├── frame001
├── ...
├── subfolder1
├── frame000
├── frame001
├── ...
├── ...
For testing datasets, there is no need to prepare LMDB files.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
io_backend (dict): IO backend type and other kwarg.
cache_data (bool): Whether to cache testing datasets.
name (str): Dataset name.
meta_info_file (str): The path to the file storing the list of test
folders. If not provided, all the folders in the dataroot will
be used.
num_frame (int): Window size for input frames.
padding (str): Padding mode.
"""
def __init__(self, opt):
super(SingleVideoRecurrentTestDataset, self).__init__()
self.opt = opt
self.cache_data = opt['cache_data']
self.lq_root = opt['dataroot_lq']
self.data_info = {'lq_path': [], 'folder': [], 'idx': [], 'border': []}
# file client (io backend)
self.file_client = None
self.imgs_lq = {}
if 'meta_info_file' in opt:
with open(opt['meta_info_file'], 'r') as fin:
subfolders = [line.split(' ')[0] for line in fin]
subfolders_lq = [osp.join(self.lq_root, key) for key in subfolders]
else:
subfolders_lq = sorted(glob.glob(osp.join(self.lq_root, '*')))
for subfolder_lq in subfolders_lq:
# get frame list for lq and gt
subfolder_name = osp.basename(subfolder_lq)
img_paths_lq = sorted(list(utils_video.scandir(subfolder_lq, full_path=True)))
max_idx = len(img_paths_lq)
self.data_info['lq_path'].extend(img_paths_lq)
self.data_info['folder'].extend([subfolder_name] * max_idx)
for i in range(max_idx):
self.data_info['idx'].append(f'{i}/{max_idx}')
border_l = [0] * max_idx
for i in range(self.opt['num_frame'] // 2):
border_l[i] = 1
border_l[max_idx - i - 1] = 1
self.data_info['border'].extend(border_l)
# cache data or save the frame list
if self.cache_data:
logger.info(f'Cache {subfolder_name} for VideoTestDataset...')
self.imgs_lq[subfolder_name] = utils_video.read_img_seq(img_paths_lq)
else:
self.imgs_lq[subfolder_name] = img_paths_lq
# Find unique folder strings
self.folders = sorted(list(set(self.data_info['folder'])))
def __getitem__(self, index):
folder = self.folders[index]
if self.cache_data:
imgs_lq = self.imgs_lq[folder]
else:
imgs_lq = utils_video.read_img_seq(self.imgs_lq[folder])
return {
'L': imgs_lq,
'folder': folder,
'lq_path': self.imgs_lq[folder],
}
def __len__(self):
return len(self.folders)
| 15,059 | 38.321149 | 97 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_sr.py | import random
import numpy as np
import torch.utils.data as data
import utils.utils_image as util
class DatasetSR(data.Dataset):
'''
# -----------------------------------------
# Get L/H for SISR.
# If only "paths_H" is provided, sythesize bicubicly downsampled L on-the-fly.
# -----------------------------------------
# e.g., SRResNet
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetSR, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.sf = opt['scale'] if opt['scale'] else 4
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
self.L_size = self.patch_size // self.sf
# ------------------------------------
# get paths of L/H
# ------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
self.paths_L = util.get_image_paths(opt['dataroot_L'])
assert self.paths_H, 'Error: H path is empty.'
if self.paths_L and self.paths_H:
assert len(self.paths_L) == len(self.paths_H), 'L/H mismatch - {}, {}.'.format(len(self.paths_L), len(self.paths_H))
def __getitem__(self, index):
L_path = None
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_H = util.uint2single(img_H)
# ------------------------------------
# modcrop
# ------------------------------------
img_H = util.modcrop(img_H, self.sf)
# ------------------------------------
# get L image
# ------------------------------------
if self.paths_L:
# --------------------------------
# directly load L image
# --------------------------------
L_path = self.paths_L[index]
img_L = util.imread_uint(L_path, self.n_channels)
img_L = util.uint2single(img_L)
else:
# --------------------------------
# sythesize L image via matlab's bicubic
# --------------------------------
H, W = img_H.shape[:2]
img_L = util.imresize_np(img_H, 1 / self.sf, True)
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, C = img_L.shape
# --------------------------------
# randomly crop the L patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.L_size))
rnd_w = random.randint(0, max(0, W - self.L_size))
img_L = img_L[rnd_h:rnd_h + self.L_size, rnd_w:rnd_w + self.L_size, :]
# --------------------------------
# crop corresponding H patch
# --------------------------------
rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
img_L, img_H = util.augment_img(img_L, mode=mode), util.augment_img(img_H, mode=mode)
# ------------------------------------
# L/H pairs, HWC to CHW, numpy to tensor
# ------------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
if L_path is None:
L_path = H_path
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 3,941 | 36.188679 | 128 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_jpeg.py | import random
import torch.utils.data as data
import utils.utils_image as util
import cv2
class DatasetJPEG(data.Dataset):
def __init__(self, opt):
super(DatasetJPEG, self).__init__()
print('Dataset: JPEG compression artifact reduction (deblocking) with quality factor. Only dataroot_H is needed.')
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = self.opt['H_size'] if opt['H_size'] else 128
self.quality_factor = opt['quality_factor'] if opt['quality_factor'] else 40
self.quality_factor_test = opt['quality_factor_test'] if opt['quality_factor_test'] else 40
self.is_color = opt['is_color'] if opt['is_color'] else False
# -------------------------------------
# get the path of H, return None if input is None
# -------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
def __getitem__(self, index):
if self.opt['phase'] == 'train':
# -------------------------------------
# get H image
# -------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, 3)
L_path = H_path
H, W = img_H.shape[:2]
self.patch_size_plus = self.patch_size + 8
# ---------------------------------
# randomly crop a large patch
# ---------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size_plus))
rnd_w = random.randint(0, max(0, W - self.patch_size_plus))
patch_H = img_H[rnd_h:rnd_h + self.patch_size_plus, rnd_w:rnd_w + self.patch_size_plus, ...]
# ---------------------------------
# augmentation - flip, rotate
# ---------------------------------
mode = random.randint(0, 7)
patch_H = util.augment_img(patch_H, mode=mode)
# ---------------------------------
# HWC to CHW, numpy(uint) to tensor
# ---------------------------------
img_L = patch_H.copy()
# ---------------------------------
# set quality factor
# ---------------------------------
quality_factor = self.quality_factor
if self.is_color: # color image
img_H = img_L.copy()
img_L = cv2.cvtColor(img_L, cv2.COLOR_RGB2BGR)
result, encimg = cv2.imencode('.jpg', img_L, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img_L = cv2.imdecode(encimg, 1)
img_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2RGB)
else:
if random.random() > 0.5:
img_L = util.rgb2ycbcr(img_L)
else:
img_L = cv2.cvtColor(img_L, cv2.COLOR_RGB2GRAY)
img_H = img_L.copy()
result, encimg = cv2.imencode('.jpg', img_L, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img_L = cv2.imdecode(encimg, 0)
# ---------------------------------
# randomly crop a patch
# ---------------------------------
H, W = img_H.shape[:2]
if random.random() > 0.5:
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
else:
rnd_h = 0
rnd_w = 0
img_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size]
img_L = img_L[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size]
else:
H_path = self.paths_H[index]
L_path = H_path
# ---------------------------------
# set quality factor
# ---------------------------------
quality_factor = self.quality_factor_test
if self.is_color: # color JPEG image deblocking
img_H = util.imread_uint(H_path, 3)
img_L = img_H.copy()
img_L = cv2.cvtColor(img_L, cv2.COLOR_RGB2BGR)
result, encimg = cv2.imencode('.jpg', img_L, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img_L = cv2.imdecode(encimg, 1)
img_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2RGB)
else:
img_H = cv2.imread(H_path, cv2.IMREAD_UNCHANGED)
is_to_ycbcr = True if img_L.ndim == 3 else False
if is_to_ycbcr:
img_H = cv2.cvtColor(img_H, cv2.COLOR_BGR2RGB)
img_H = util.rgb2ycbcr(img_H)
result, encimg = cv2.imencode('.jpg', img_H, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img_L = cv2.imdecode(encimg, 0)
img_L, img_H = util.uint2tensor3(img_L), util.uint2tensor3(img_H)
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 5,084 | 41.731092 | 122 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_blindsr.py | import random
import numpy as np
import torch.utils.data as data
import utils.utils_image as util
import os
from utils import utils_blindsr as blindsr
class DatasetBlindSR(data.Dataset):
'''
# -----------------------------------------
# dataset for BSRGAN
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetBlindSR, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.sf = opt['scale'] if opt['scale'] else 4
self.shuffle_prob = opt['shuffle_prob'] if opt['shuffle_prob'] else 0.1
self.use_sharp = opt['use_sharp'] if opt['use_sharp'] else False
self.degradation_type = opt['degradation_type'] if opt['degradation_type'] else 'bsrgan'
self.lq_patchsize = self.opt['lq_patchsize'] if self.opt['lq_patchsize'] else 64
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else self.lq_patchsize*self.sf
self.paths_H = util.get_image_paths(opt['dataroot_H'])
print(len(self.paths_H))
# for n, v in enumerate(self.paths_H):
# if 'face' in v:
# del self.paths_H[n]
# time.sleep(1)
assert self.paths_H, 'Error: H path is empty.'
self.if_mask = True if opt['if_mask'] else False
def __getitem__(self, index):
L_path = None
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_name, ext = os.path.splitext(os.path.basename(H_path))
H, W, C = img_H.shape
if H < self.patch_size or W < self.patch_size:
img_H = np.tile(np.random.randint(0, 256, size=[1, 1, self.n_channels], dtype=np.uint8), (self.patch_size, self.patch_size, 1))
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, C = img_H.shape
rnd_h_H = random.randint(0, max(0, H - self.patch_size))
rnd_w_H = random.randint(0, max(0, W - self.patch_size))
img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]
if 'face' in img_name:
mode = random.choice([0, 4])
img_H = util.augment_img(img_H, mode=mode)
else:
mode = random.randint(0, 7)
img_H = util.augment_img(img_H, mode=mode)
img_H = util.uint2single(img_H)
if self.degradation_type == 'bsrgan':
img_L, img_H = blindsr.degradation_bsrgan(img_H, self.sf, lq_patchsize=self.lq_patchsize, isp_model=None)
else:
img_H = util.uint2single(img_H)
if self.degradation_type == 'bsrgan':
img_L, img_H = blindsr.degradation_bsrgan(img_H, self.sf, lq_patchsize=self.lq_patchsize, isp_model=None)
# ------------------------------------
# L/H pairs, HWC to CHW, numpy to tensor
# ------------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
if L_path is None:
L_path = H_path
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 3,515 | 37.217391 | 139 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_fdncnn.py | import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
class DatasetFDnCNN(data.Dataset):
"""
# -----------------------------------------
# Get L/H/M for denosing on AWGN with a range of sigma.
# Only dataroot_H is needed.
# -----------------------------------------
# e.g., FDnCNN, H = f(cat(L, M)), M is noise level map
# -----------------------------------------
"""
def __init__(self, opt):
super(DatasetFDnCNN, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = self.opt['H_size'] if opt['H_size'] else 64
self.sigma = opt['sigma'] if opt['sigma'] else [0, 75]
self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1]
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 25
# -------------------------------------
# get the path of H, return None if input is None
# -------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
def __getitem__(self, index):
# -------------------------------------
# get H image
# -------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
L_path = H_path
if self.opt['phase'] == 'train':
"""
# --------------------------------
# get L/H/M patch pairs
# --------------------------------
"""
H, W = img_H.shape[:2]
# ---------------------------------
# randomly crop the patch
# ---------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# ---------------------------------
# augmentation - flip, rotate
# ---------------------------------
mode = random.randint(0, 7)
patch_H = util.augment_img(patch_H, mode=mode)
# ---------------------------------
# HWC to CHW, numpy(uint) to tensor
# ---------------------------------
img_H = util.uint2tensor3(patch_H)
img_L = img_H.clone()
# ---------------------------------
# get noise level
# ---------------------------------
# noise_level = torch.FloatTensor([np.random.randint(self.sigma_min, self.sigma_max)])/255.0
noise_level = torch.FloatTensor([np.random.uniform(self.sigma_min, self.sigma_max)])/255.0
noise_level_map = torch.ones((1, img_L.size(1), img_L.size(2))).mul_(noise_level).float() # torch.full((1, img_L.size(1), img_L.size(2)), noise_level)
# ---------------------------------
# add noise
# ---------------------------------
noise = torch.randn(img_L.size()).mul_(noise_level).float()
img_L.add_(noise)
else:
"""
# --------------------------------
# get L/H/M image pairs
# --------------------------------
"""
img_H = util.uint2single(img_H)
img_L = np.copy(img_H)
np.random.seed(seed=0)
img_L += np.random.normal(0, self.sigma_test/255.0, img_L.shape)
noise_level_map = torch.ones((1, img_L.shape[0], img_L.shape[1])).mul_(self.sigma_test/255.0).float() # torch.full((1, img_L.size(1), img_L.size(2)), noise_level)
# ---------------------------------
# L/H image pairs
# ---------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
"""
# -------------------------------------
# concat L and noise level map M
# -------------------------------------
"""
img_L = torch.cat((img_L, noise_level_map), 0)
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 4,290 | 38.009091 | 175 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_plain.py | import random
import numpy as np
import torch.utils.data as data
import utils.utils_image as util
class DatasetPlain(data.Dataset):
'''
# -----------------------------------------
# Get L/H for image-to-image mapping.
# Both "paths_L" and "paths_H" are needed.
# -----------------------------------------
# e.g., train denoiser with L and H
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetPlain, self).__init__()
print('Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.')
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 64
# ------------------------------------
# get the path of L/H
# ------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
self.paths_L = util.get_image_paths(opt['dataroot_L'])
assert self.paths_H, 'Error: H path is empty.'
assert self.paths_L, 'Error: L path is empty. Plain dataset assumes both L and H are given!'
if self.paths_L and self.paths_H:
assert len(self.paths_L) == len(self.paths_H), 'L/H mismatch - {}, {}.'.format(len(self.paths_L), len(self.paths_H))
def __getitem__(self, index):
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
# ------------------------------------
# get L image
# ------------------------------------
L_path = self.paths_L[index]
img_L = util.imread_uint(L_path, self.n_channels)
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, _ = img_H.shape
# --------------------------------
# randomly crop the patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_L = img_L[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
patch_L, patch_H = util.augment_img(patch_L, mode=mode), util.augment_img(patch_H, mode=mode)
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.uint2tensor3(patch_L), util.uint2tensor3(patch_H)
else:
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_L, img_H = util.uint2tensor3(img_L), util.uint2tensor3(img_H)
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 3,347 | 37.930233 | 128 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_usrnet.py | import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
from utils import utils_deblur
from utils import utils_sisr
import os
from scipy import ndimage
from scipy.io import loadmat
# import hdf5storage
class DatasetUSRNet(data.Dataset):
'''
# -----------------------------------------
# Get L/k/sf/sigma for USRNet.
# Only "paths_H" and kernel is needed, synthesize L on-the-fly.
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetUSRNet, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
self.sigma_max = self.opt['sigma_max'] if self.opt['sigma_max'] is not None else 25
self.scales = opt['scales'] if opt['scales'] is not None else [1,2,3,4]
self.sf_validation = opt['sf_validation'] if opt['sf_validation'] is not None else 3
#self.kernels = hdf5storage.loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels']
self.kernels = loadmat(os.path.join('kernels', 'kernels_12.mat'))['kernels'] # for validation
# -------------------
# get the path of H
# -------------------
self.paths_H = util.get_image_paths(opt['dataroot_H']) # return None if input is None
self.count = 0
def __getitem__(self, index):
# -------------------
# get H image
# -------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
L_path = H_path
if self.opt['phase'] == 'train':
# ---------------------------
# 1) scale factor, ensure each batch only involves one scale factor
# ---------------------------
if self.count % self.opt['dataloader_batch_size'] == 0:
# sf = random.choice([1,2,3,4])
self.sf = random.choice(self.scales)
# self.count = 0 # optional
self.count += 1
H, W, _ = img_H.shape
# ----------------------------
# randomly crop the patch
# ----------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# ---------------------------
# augmentation - flip, rotate
# ---------------------------
mode = np.random.randint(0, 8)
patch_H = util.augment_img(patch_H, mode=mode)
# ---------------------------
# 2) kernel
# ---------------------------
r_value = random.randint(0, 7)
if r_value>3:
k = utils_deblur.blurkernel_synthesis(h=25) # motion blur
else:
sf_k = random.choice(self.scales)
k = utils_sisr.gen_kernel(scale_factor=np.array([sf_k, sf_k])) # Gaussian blur
mode_k = random.randint(0, 7)
k = util.augment_img(k, mode=mode_k)
# ---------------------------
# 3) noise level
# ---------------------------
if random.randint(0, 8) == 1:
noise_level = 0/255.0
else:
noise_level = np.random.randint(0, self.sigma_max)/255.0
# ---------------------------
# Low-quality image
# ---------------------------
img_L = ndimage.filters.convolve(patch_H, np.expand_dims(k, axis=2), mode='wrap')
img_L = img_L[0::self.sf, 0::self.sf, ...]
# add Gaussian noise
img_L = util.uint2single(img_L) + np.random.normal(0, noise_level, img_L.shape)
img_H = patch_H
else:
k = self.kernels[0, 0].astype(np.float64) # validation kernel
k /= np.sum(k)
noise_level = 0./255.0 # validation noise level
# ------------------------------------
# modcrop
# ------------------------------------
img_H = util.modcrop(img_H, self.sf_validation)
img_L = ndimage.filters.convolve(img_H, np.expand_dims(k, axis=2), mode='wrap') # blur
img_L = img_L[0::self.sf_validation, 0::self.sf_validation, ...] # downsampling
img_L = util.uint2single(img_L) + np.random.normal(0, noise_level, img_L.shape)
self.sf = self.sf_validation
k = util.single2tensor3(np.expand_dims(np.float32(k), axis=2))
img_H, img_L = util.uint2tensor3(img_H), util.single2tensor3(img_L)
noise_level = torch.FloatTensor([noise_level]).view([1,1,1])
return {'L': img_L, 'H': img_H, 'k': k, 'sigma': noise_level, 'sf': self.sf, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 5,037 | 38.669291 | 120 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_plainpatch.py | import os.path
import random
import numpy as np
import torch.utils.data as data
import utils.utils_image as util
class DatasetPlainPatch(data.Dataset):
'''
# -----------------------------------------
# Get L/H for image-to-image mapping.
# Both "paths_L" and "paths_H" are needed.
# -----------------------------------------
# e.g., train denoiser with L and H patches
# create a large patch dataset first
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetPlainPatch, self).__init__()
print('Get L/H for image-to-image mapping. Both "paths_L" and "paths_H" are needed.')
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 64
self.num_patches_per_image = opt['num_patches_per_image'] if opt['num_patches_per_image'] else 40
self.num_sampled = opt['num_sampled'] if opt['num_sampled'] else 3000
# -------------------
# get the path of L/H
# -------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
self.paths_L = util.get_image_paths(opt['dataroot_L'])
assert self.paths_H, 'Error: H path is empty.'
assert self.paths_L, 'Error: L path is empty. This dataset uses L path, you can use dataset_dnpatchh'
if self.paths_L and self.paths_H:
assert len(self.paths_L) == len(self.paths_H), 'H and L datasets have different number of images - {}, {}.'.format(len(self.paths_L), len(self.paths_H))
# ------------------------------------
# number of sampled images
# ------------------------------------
self.num_sampled = min(self.num_sampled, len(self.paths_H))
# ------------------------------------
# reserve space with zeros
# ------------------------------------
self.total_patches = self.num_sampled * self.num_patches_per_image
self.H_data = np.zeros([self.total_patches, self.path_size, self.path_size, self.n_channels], dtype=np.uint8)
self.L_data = np.zeros([self.total_patches, self.path_size, self.path_size, self.n_channels], dtype=np.uint8)
# ------------------------------------
# update H patches
# ------------------------------------
self.update_data()
def update_data(self):
"""
# ------------------------------------
# update whole L/H patches
# ------------------------------------
"""
self.index_sampled = random.sample(range(0, len(self.paths_H), 1), self.num_sampled)
n_count = 0
for i in range(len(self.index_sampled)):
L_patches, H_patches = self.get_patches(self.index_sampled[i])
for (L_patch, H_patch) in zip(L_patches, H_patches):
self.L_data[n_count,:,:,:] = L_patch
self.H_data[n_count,:,:,:] = H_patch
n_count += 1
print('Training data updated! Total number of patches is: %5.2f X %5.2f = %5.2f\n' % (len(self.H_data)//128, 128, len(self.H_data)))
def get_patches(self, index):
"""
# ------------------------------------
# get L/H patches from L/H images
# ------------------------------------
"""
L_path = self.paths_L[index]
H_path = self.paths_H[index]
img_L = util.imread_uint(L_path, self.n_channels) # uint format
img_H = util.imread_uint(H_path, self.n_channels) # uint format
H, W = img_H.shape[:2]
L_patches, H_patches = [], []
num = self.num_patches_per_image
for _ in range(num):
rnd_h = random.randint(0, max(0, H - self.path_size))
rnd_w = random.randint(0, max(0, W - self.path_size))
L_patch = img_L[rnd_h:rnd_h + self.path_size, rnd_w:rnd_w + self.path_size, :]
H_patch = img_H[rnd_h:rnd_h + self.path_size, rnd_w:rnd_w + self.path_size, :]
L_patches.append(L_patch)
H_patches.append(H_patch)
return L_patches, H_patches
def __getitem__(self, index):
if self.opt['phase'] == 'train':
patch_L, patch_H = self.L_data[index], self.H_data[index]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
patch_L = util.augment_img(patch_L, mode=mode)
patch_H = util.augment_img(patch_H, mode=mode)
patch_L, patch_H = util.uint2tensor3(patch_L), util.uint2tensor3(patch_H)
else:
L_path, H_path = self.paths_L[index], self.paths_H[index]
patch_L = util.imread_uint(L_path, self.n_channels)
patch_H = util.imread_uint(H_path, self.n_channels)
patch_L, patch_H = util.uint2tensor3(patch_L), util.uint2tensor3(patch_H)
return {'L': patch_L, 'H': patch_H}
def __len__(self):
return self.total_patches
| 5,097 | 37.621212 | 164 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_dncnn.py | import os.path
import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
class DatasetDnCNN(data.Dataset):
"""
# -----------------------------------------
# Get L/H for denosing on AWGN with fixed sigma.
# Only dataroot_H is needed.
# -----------------------------------------
# e.g., DnCNN
# -----------------------------------------
"""
def __init__(self, opt):
super(DatasetDnCNN, self).__init__()
print('Dataset: Denosing on AWGN with fixed sigma. Only dataroot_H is needed.')
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = opt['H_size'] if opt['H_size'] else 64
self.sigma = opt['sigma'] if opt['sigma'] else 25
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else self.sigma
# ------------------------------------
# get path of H
# return None if input is None
# ------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
def __getitem__(self, index):
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
L_path = H_path
if self.opt['phase'] == 'train':
"""
# --------------------------------
# get L/H patch pairs
# --------------------------------
"""
H, W, _ = img_H.shape
# --------------------------------
# randomly crop the patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# --------------------------------
# augmentation - flip, rotate
# --------------------------------
mode = random.randint(0, 7)
patch_H = util.augment_img(patch_H, mode=mode)
# --------------------------------
# HWC to CHW, numpy(uint) to tensor
# --------------------------------
img_H = util.uint2tensor3(patch_H)
img_L = img_H.clone()
# --------------------------------
# add noise
# --------------------------------
noise = torch.randn(img_L.size()).mul_(self.sigma/255.0)
img_L.add_(noise)
else:
"""
# --------------------------------
# get L/H image pairs
# --------------------------------
"""
img_H = util.uint2single(img_H)
img_L = np.copy(img_H)
# --------------------------------
# add noise
# --------------------------------
np.random.seed(seed=0)
img_L += np.random.normal(0, self.sigma_test/255.0, img_L.shape)
# --------------------------------
# HWC to CHW, numpy to tensor
# --------------------------------
img_L = util.single2tensor3(img_L)
img_H = util.single2tensor3(img_H)
return {'L': img_L, 'H': img_H, 'H_path': H_path, 'L_path': L_path}
def __len__(self):
return len(self.paths_H)
| 3,505 | 33.372549 | 92 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_dpsr.py | import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
class DatasetDPSR(data.Dataset):
'''
# -----------------------------------------
# Get L/H/M for noisy image SR.
# Only "paths_H" is needed, sythesize bicubicly downsampled L on-the-fly.
# -----------------------------------------
# e.g., SRResNet super-resolver prior for DPSR
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetDPSR, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.sf = opt['scale'] if opt['scale'] else 4
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
self.L_size = self.patch_size // self.sf
self.sigma = opt['sigma'] if opt['sigma'] else [0, 50]
self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1]
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 0
# ------------------------------------
# get paths of L/H
# ------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
self.paths_L = util.get_image_paths(opt['dataroot_L'])
assert self.paths_H, 'Error: H path is empty.'
def __getitem__(self, index):
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_H = util.uint2single(img_H)
# ------------------------------------
# modcrop for SR
# ------------------------------------
img_H = util.modcrop(img_H, self.sf)
# ------------------------------------
# sythesize L image via matlab's bicubic
# ------------------------------------
H, W, _ = img_H.shape
img_L = util.imresize_np(img_H, 1 / self.sf, True)
if self.opt['phase'] == 'train':
"""
# --------------------------------
# get L/H patch pairs
# --------------------------------
"""
H, W, C = img_L.shape
# --------------------------------
# randomly crop L patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.L_size))
rnd_w = random.randint(0, max(0, W - self.L_size))
img_L = img_L[rnd_h:rnd_h + self.L_size, rnd_w:rnd_w + self.L_size, :]
# --------------------------------
# crop corresponding H patch
# --------------------------------
rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
img_L, img_H = util.augment_img(img_L, mode=mode), util.augment_img(img_H, mode=mode)
# --------------------------------
# get patch pairs
# --------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
# --------------------------------
# select noise level and get Gaussian noise
# --------------------------------
if random.random() < 0.1:
noise_level = torch.zeros(1).float()
else:
noise_level = torch.FloatTensor([np.random.uniform(self.sigma_min, self.sigma_max)])/255.0
# noise_level = torch.rand(1)*50/255.0
# noise_level = torch.min(torch.from_numpy(np.float32([7*np.random.chisquare(2.5)/255.0])),torch.Tensor([50./255.]))
else:
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
noise_level = torch.FloatTensor([self.sigma_test])
# ------------------------------------
# add noise
# ------------------------------------
noise = torch.randn(img_L.size()).mul_(noise_level).float()
img_L.add_(noise)
# ------------------------------------
# get noise level map M
# ------------------------------------
M_vector = noise_level.unsqueeze(1).unsqueeze(1)
M = M_vector.repeat(1, img_L.size()[-2], img_L.size()[-1])
"""
# -------------------------------------
# concat L and noise level map M
# -------------------------------------
"""
img_L = torch.cat((img_L, M), 0)
L_path = H_path
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 4,930 | 36.356061 | 132 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_masked_denoising.py | import random
import numpy as np
import torch.utils.data as data
import utils.utils_image as util
import os
from utils import utils_mask
class DatasetMaskedDenoising(data.Dataset):
'''
# -----------------------------------------
# dataset for BSRGAN
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetMaskedDenoising, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.sf = opt['scale'] if opt['scale'] else 1
self.lq_patchsize = self.opt['lq_patchsize'] if self.opt['lq_patchsize'] else 64
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else self.lq_patchsize*self.sf
self.paths_H = util.get_image_paths(opt['dataroot_H'])
print(f'len(self.paths_H): {len(self.paths_H)}')
assert self.paths_H, 'Error: H path is empty.'
self.if_mask = True if opt['if_mask'] else False
def __getitem__(self, index):
L_path = None
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_name, ext = os.path.splitext(os.path.basename(H_path))
H, W, C = img_H.shape
if H < self.patch_size or W < self.patch_size:
img_H = np.tile(np.random.randint(0, 256, size=[1, 1, self.n_channels], dtype=np.uint8), (self.patch_size, self.patch_size, 1))
# ------------------------------------
# if train, get L/H patch pair
# ------------------------------------
if self.opt['phase'] == 'train':
H, W, C = img_H.shape
rnd_h_H = random.randint(0, max(0, H - self.patch_size))
rnd_w_H = random.randint(0, max(0, W - self.patch_size))
img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]
mode = random.randint(0, 7)
img_H = util.augment_img(img_H, mode=mode)
img_H = util.uint2single(img_H)
img_L, img_H = utils_mask.input_mask_with_noise(img_H,
sf=self.sf,
lq_patchsize=self.lq_patchsize,
noise_level=self.opt['noise_level'],
if_mask=self.if_mask,
mask1=self.opt['mask1'],
mask2=self.opt['mask2'])
else:
img_H = util.uint2single(img_H)
img_L, img_H = utils_mask.input_mask_with_noise(img_H, self.sf, lq_patchsize=self.lq_patchsize)
# ------------------------------------
# L/H pairs, HWC to CHW, numpy to tensor
# ------------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
if L_path is None:
L_path = H_path
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 3,340 | 38.77381 | 139 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_l.py | import torch.utils.data as data
import utils.utils_image as util
class DatasetL(data.Dataset):
'''
# -----------------------------------------
# Get L in testing.
# Only "dataroot_L" is needed.
# -----------------------------------------
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetL, self).__init__()
print('Read L in testing. Only "dataroot_L" is needed.')
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
# ------------------------------------
# get the path of L
# ------------------------------------
self.paths_L = util.get_image_paths(opt['dataroot_L'])
assert self.paths_L, 'Error: L paths are empty.'
def __getitem__(self, index):
L_path = None
# ------------------------------------
# get L image
# ------------------------------------
L_path = self.paths_L[index]
img_L = util.imread_uint(L_path, self.n_channels)
# ------------------------------------
# HWC to CHW, numpy to tensor
# ------------------------------------
img_L = util.uint2tensor3(img_L)
return {'L': img_L, 'L_path': L_path}
def __len__(self):
return len(self.paths_L)
| 1,337 | 29.409091 | 71 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_ffdnet.py | import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
class DatasetFFDNet(data.Dataset):
"""
# -----------------------------------------
# Get L/H/M for denosing on AWGN with a range of sigma.
# Only dataroot_H is needed.
# -----------------------------------------
# e.g., FFDNet, H = f(L, sigma), sigma is noise level
# -----------------------------------------
"""
def __init__(self, opt):
super(DatasetFFDNet, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = self.opt['H_size'] if opt['H_size'] else 64
self.sigma = opt['sigma'] if opt['sigma'] else [0, 75]
self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1]
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 25
# -------------------------------------
# get the path of H, return None if input is None
# -------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
def __getitem__(self, index):
# -------------------------------------
# get H image
# -------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
L_path = H_path
if self.opt['phase'] == 'train':
"""
# --------------------------------
# get L/H/M patch pairs
# --------------------------------
"""
H, W = img_H.shape[:2]
# ---------------------------------
# randomly crop the patch
# ---------------------------------
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
patch_H = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
# ---------------------------------
# augmentation - flip, rotate
# ---------------------------------
mode = random.randint(0, 7)
patch_H = util.augment_img(patch_H, mode=mode)
# ---------------------------------
# HWC to CHW, numpy(uint) to tensor
# ---------------------------------
img_H = util.uint2tensor3(patch_H)
img_L = img_H.clone()
# ---------------------------------
# get noise level
# ---------------------------------
# noise_level = torch.FloatTensor([np.random.randint(self.sigma_min, self.sigma_max)])/255.0
noise_level = torch.FloatTensor([np.random.uniform(self.sigma_min, self.sigma_max)])/255.0
# ---------------------------------
# add noise
# ---------------------------------
noise = torch.randn(img_L.size()).mul_(noise_level).float()
img_L.add_(noise)
else:
"""
# --------------------------------
# get L/H/sigma image pairs
# --------------------------------
"""
img_H = util.uint2single(img_H)
img_L = np.copy(img_H)
np.random.seed(seed=0)
img_L += np.random.normal(0, self.sigma_test/255.0, img_L.shape)
noise_level = torch.FloatTensor([self.sigma_test/255.0])
# ---------------------------------
# L/H image pairs
# ---------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
noise_level = noise_level.unsqueeze(1).unsqueeze(1)
return {'L': img_L, 'H': img_H, 'C': noise_level, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 3,884 | 36.355769 | 104 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_srmd.py | import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
from utils import utils_sisr
import hdf5storage
import os
class DatasetSRMD(data.Dataset):
'''
# -----------------------------------------
# Get L/H/M for noisy image SR with Gaussian kernels.
# Only "paths_H" is needed, sythesize bicubicly downsampled L on-the-fly.
# -----------------------------------------
# e.g., SRMD, H = f(L, kernel, sigma), sigma is noise level
# -----------------------------------------
'''
def __init__(self, opt):
super(DatasetSRMD, self).__init__()
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.sf = opt['scale'] if opt['scale'] else 4
self.patch_size = self.opt['H_size'] if self.opt['H_size'] else 96
self.L_size = self.patch_size // self.sf
self.sigma = opt['sigma'] if opt['sigma'] else [0, 50]
self.sigma_min, self.sigma_max = self.sigma[0], self.sigma[1]
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else 0
# -------------------------------------
# PCA projection matrix
# -------------------------------------
self.p = hdf5storage.loadmat(os.path.join('kernels', 'srmd_pca_pytorch.mat'))['p']
self.ksize = int(np.sqrt(self.p.shape[-1])) # kernel size
# ------------------------------------
# get paths of L/H
# ------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
self.paths_L = util.get_image_paths(opt['dataroot_L'])
def __getitem__(self, index):
# ------------------------------------
# get H image
# ------------------------------------
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_H = util.uint2single(img_H)
# ------------------------------------
# modcrop for SR
# ------------------------------------
img_H = util.modcrop(img_H, self.sf)
# ------------------------------------
# kernel
# ------------------------------------
if self.opt['phase'] == 'train':
l_max = 10
theta = np.pi*random.random()
l1 = 0.1+l_max*random.random()
l2 = 0.1+(l1-0.1)*random.random()
kernel = utils_sisr.anisotropic_Gaussian(ksize=self.ksize, theta=theta, l1=l1, l2=l2)
else:
kernel = utils_sisr.anisotropic_Gaussian(ksize=self.ksize, theta=np.pi, l1=0.1, l2=0.1)
k = np.reshape(kernel, (-1), order="F")
k_reduced = np.dot(self.p, k)
k_reduced = torch.from_numpy(k_reduced).float()
# ------------------------------------
# sythesize L image via specified degradation model
# ------------------------------------
H, W, _ = img_H.shape
img_L = utils_sisr.srmd_degradation(img_H, kernel, self.sf)
img_L = np.float32(img_L)
if self.opt['phase'] == 'train':
"""
# --------------------------------
# get L/H patch pairs
# --------------------------------
"""
H, W, C = img_L.shape
# --------------------------------
# randomly crop L patch
# --------------------------------
rnd_h = random.randint(0, max(0, H - self.L_size))
rnd_w = random.randint(0, max(0, W - self.L_size))
img_L = img_L[rnd_h:rnd_h + self.L_size, rnd_w:rnd_w + self.L_size, :]
# --------------------------------
# crop corresponding H patch
# --------------------------------
rnd_h_H, rnd_w_H = int(rnd_h * self.sf), int(rnd_w * self.sf)
img_H = img_H[rnd_h_H:rnd_h_H + self.patch_size, rnd_w_H:rnd_w_H + self.patch_size, :]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
img_L, img_H = util.augment_img(img_L, mode=mode), util.augment_img(img_H, mode=mode)
# --------------------------------
# get patch pairs
# --------------------------------
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
# --------------------------------
# select noise level and get Gaussian noise
# --------------------------------
if random.random() < 0.1:
noise_level = torch.zeros(1).float()
else:
noise_level = torch.FloatTensor([np.random.uniform(self.sigma_min, self.sigma_max)])/255.0
# noise_level = torch.rand(1)*50/255.0
# noise_level = torch.min(torch.from_numpy(np.float32([7*np.random.chisquare(2.5)/255.0])),torch.Tensor([50./255.]))
else:
img_H, img_L = util.single2tensor3(img_H), util.single2tensor3(img_L)
noise_level = noise_level = torch.FloatTensor([self.sigma_test])
# ------------------------------------
# add noise
# ------------------------------------
noise = torch.randn(img_L.size()).mul_(noise_level).float()
img_L.add_(noise)
# ------------------------------------
# get degradation map M
# ------------------------------------
M_vector = torch.cat((k_reduced, noise_level), 0).unsqueeze(1).unsqueeze(1)
M = M_vector.repeat(1, img_L.size()[-2], img_L.size()[-1])
"""
# -------------------------------------
# concat L and noise level map M
# -------------------------------------
"""
img_L = torch.cat((img_L, M), 0)
L_path = H_path
return {'L': img_L, 'H': img_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.paths_H)
| 6,011 | 37.538462 | 132 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_dnpatch.py | import random
import numpy as np
import torch
import torch.utils.data as data
import utils.utils_image as util
class DatasetDnPatch(data.Dataset):
"""
# -----------------------------------------
# Get L/H for denosing on AWGN with fixed sigma.
# ****Get all H patches first****
# Only dataroot_H is needed.
# -----------------------------------------
# e.g., DnCNN with BSD400
# -----------------------------------------
"""
def __init__(self, opt):
super(DatasetDnPatch, self).__init__()
print('Get L/H for denosing on AWGN with fixed sigma. Only dataroot_H is needed.')
self.opt = opt
self.n_channels = opt['n_channels'] if opt['n_channels'] else 3
self.patch_size = opt['H_size'] if opt['H_size'] else 64
self.sigma = opt['sigma'] if opt['sigma'] else 25
self.sigma_test = opt['sigma_test'] if opt['sigma_test'] else self.sigma
self.num_patches_per_image = opt['num_patches_per_image'] if opt['num_patches_per_image'] else 40
self.num_sampled = opt['num_sampled'] if opt['num_sampled'] else 3000
# ------------------------------------
# get paths of H
# ------------------------------------
self.paths_H = util.get_image_paths(opt['dataroot_H'])
assert self.paths_H, 'Error: H path is empty.'
# ------------------------------------
# number of sampled H images
# ------------------------------------
self.num_sampled = min(self.num_sampled, len(self.paths_H))
# ------------------------------------
# reserve space with zeros
# ------------------------------------
self.total_patches = self.num_sampled * self.num_patches_per_image
self.H_data = np.zeros([self.total_patches, self.patch_size, self.patch_size, self.n_channels], dtype=np.uint8)
# ------------------------------------
# update H patches
# ------------------------------------
self.update_data()
def update_data(self):
"""
# ------------------------------------
# update whole H patches
# ------------------------------------
"""
self.index_sampled = random.sample(range(0, len(self.paths_H), 1), self.num_sampled)
n_count = 0
for i in range(len(self.index_sampled)):
H_patches = self.get_patches(self.index_sampled[i])
for H_patch in H_patches:
self.H_data[n_count,:,:,:] = H_patch
n_count += 1
print('Training data updated! Total number of patches is: %5.2f X %5.2f = %5.2f\n' % (len(self.H_data)//128, 128, len(self.H_data)))
def get_patches(self, index):
"""
# ------------------------------------
# get H patches from an H image
# ------------------------------------
"""
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels) # uint format
H, W = img_H.shape[:2]
H_patches = []
num = self.num_patches_per_image
for _ in range(num):
rnd_h = random.randint(0, max(0, H - self.patch_size))
rnd_w = random.randint(0, max(0, W - self.patch_size))
H_patch = img_H[rnd_h:rnd_h + self.patch_size, rnd_w:rnd_w + self.patch_size, :]
H_patches.append(H_patch)
return H_patches
def __getitem__(self, index):
H_path = 'toy.png'
if self.opt['phase'] == 'train':
patch_H = self.H_data[index]
# --------------------------------
# augmentation - flip and/or rotate
# --------------------------------
mode = random.randint(0, 7)
patch_H = util.augment_img(patch_H, mode=mode)
patch_H = util.uint2tensor3(patch_H)
patch_L = patch_H.clone()
# ------------------------------------
# add noise
# ------------------------------------
noise = torch.randn(patch_L.size()).mul_(self.sigma/255.0)
patch_L.add_(noise)
else:
H_path = self.paths_H[index]
img_H = util.imread_uint(H_path, self.n_channels)
img_H = util.uint2single(img_H)
img_L = np.copy(img_H)
# ------------------------------------
# add noise
# ------------------------------------
np.random.seed(seed=0)
img_L += np.random.normal(0, self.sigma_test/255.0, img_L.shape)
patch_L, patch_H = util.single2tensor3(img_L), util.single2tensor3(img_H)
L_path = H_path
return {'L': patch_L, 'H': patch_H, 'L_path': L_path, 'H_path': H_path}
def __len__(self):
return len(self.H_data)
| 4,808 | 34.88806 | 141 | py |
MaskedDenoising | MaskedDenoising-main/data/dataset_video_train.py | import numpy as np
import random
import torch
from pathlib import Path
import torch.utils.data as data
import utils.utils_video as utils_video
class VideoRecurrentTrainDataset(data.Dataset):
"""Video dataset for training recurrent networks.
The keys are generated from a meta info txt file.
basicsr/data/meta_info/meta_info_XXX_GT.txt
Each line contains:
1. subfolder (clip) name; 2. frame number; 3. image shape, separated by
a white space.
Examples:
720p_240fps_1 100 (720,1280,3)
720p_240fps_3 100 (720,1280,3)
...
Key examples: "720p_240fps_1/00000"
GT (gt): Ground-Truth;
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
dataroot_flow (str, optional): Data root path for flow.
meta_info_file (str): Path for meta information file.
val_partition (str): Validation partition types. 'REDS4' or
'official'.
io_backend (dict): IO backend type and other kwarg.
num_frame (int): Window size for input frames.
gt_size (int): Cropped patched size for gt patches.
interval_list (list): Interval list for temporal augmentation.
random_reverse (bool): Random reverse input frames.
use_hflip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation).
scale (bool): Scale, which will be added automatically.
"""
def __init__(self, opt):
super(VideoRecurrentTrainDataset, self).__init__()
self.opt = opt
self.scale = opt.get('scale', 4)
self.gt_size = opt.get('gt_size', 256)
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])
self.filename_tmpl = opt.get('filename_tmpl', '08d')
self.filename_ext = opt.get('filename_ext', 'png')
self.num_frame = opt['num_frame']
keys = []
total_num_frames = [] # some clips may not have 100 frames
start_frames = [] # some clips may not start from 00000
with open(opt['meta_info_file'], 'r') as fin:
for line in fin:
folder, frame_num, _, start_frame = line.split(' ')
keys.extend([f'{folder}/{i:{self.filename_tmpl}}' for i in range(int(start_frame), int(start_frame)+int(frame_num))])
total_num_frames.extend([int(frame_num) for i in range(int(frame_num))])
start_frames.extend([int(start_frame) for i in range(int(frame_num))])
# remove the video clips used in validation
if opt['name'] == 'REDS':
if opt['val_partition'] == 'REDS4':
val_partition = ['000', '011', '015', '020']
elif opt['val_partition'] == 'official':
val_partition = [f'{v:03d}' for v in range(240, 270)]
else:
raise ValueError(f'Wrong validation partition {opt["val_partition"]}.'
f"Supported ones are ['official', 'REDS4'].")
else:
val_partition = []
self.keys = []
self.total_num_frames = [] # some clips may not have 100 frames
self.start_frames = []
if opt['test_mode']:
for i, v in zip(range(len(keys)), keys):
if v.split('/')[0] in val_partition:
self.keys.append(keys[i])
self.total_num_frames.append(total_num_frames[i])
self.start_frames.append(start_frames[i])
else:
for i, v in zip(range(len(keys)), keys):
if v.split('/')[0] not in val_partition:
self.keys.append(keys[i])
self.total_num_frames.append(total_num_frames[i])
self.start_frames.append(start_frames[i])
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.is_lmdb = False
if self.io_backend_opt['type'] == 'lmdb':
self.is_lmdb = True
if hasattr(self, 'flow_root') and self.flow_root is not None:
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root, self.flow_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt', 'flow']
else:
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
# temporal augmentation configs
self.interval_list = opt.get('interval_list', [1])
self.random_reverse = opt.get('random_reverse', False)
interval_str = ','.join(str(x) for x in self.interval_list)
print(f'Temporal augmentation interval list: [{interval_str}]; '
f'random reverse is {self.random_reverse}.')
def __getitem__(self, index):
if self.file_client is None:
self.file_client = utils_video.FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
key = self.keys[index]
total_num_frames = self.total_num_frames[index]
start_frames = self.start_frames[index]
clip_name, frame_name = key.split('/') # key example: 000/00000000
# determine the neighboring frames
interval = random.choice(self.interval_list)
# ensure not exceeding the borders
start_frame_idx = int(frame_name)
endmost_start_frame_idx = start_frames + total_num_frames - self.num_frame * interval
if start_frame_idx > endmost_start_frame_idx:
start_frame_idx = random.randint(start_frames, endmost_start_frame_idx)
end_frame_idx = start_frame_idx + self.num_frame * interval
neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
# random reverse
if self.random_reverse and random.random() < 0.5:
neighbor_list.reverse()
# get the neighboring LQ and GT frames
img_lqs = []
img_gts = []
for neighbor in neighbor_list:
if self.is_lmdb:
img_lq_path = f'{clip_name}/{neighbor:{self.filename_tmpl}}'
img_gt_path = f'{clip_name}/{neighbor:{self.filename_tmpl}}'
else:
img_lq_path = self.lq_root / clip_name / f'{neighbor:{self.filename_tmpl}}.{self.filename_ext}'
img_gt_path = self.gt_root / clip_name / f'{neighbor:{self.filename_tmpl}}.{self.filename_ext}'
# get LQ
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = utils_video.imfrombytes(img_bytes, float32=True)
img_lqs.append(img_lq)
# get GT
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = utils_video.imfrombytes(img_bytes, float32=True)
img_gts.append(img_gt)
# randomly crop
img_gts, img_lqs = utils_video.paired_random_crop(img_gts, img_lqs, self.gt_size, self.scale, img_gt_path)
# augmentation - flip, rotate
img_lqs.extend(img_gts)
img_results = utils_video.augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
img_results = utils_video.img2tensor(img_results)
img_gts = torch.stack(img_results[len(img_lqs) // 2:], dim=0)
img_lqs = torch.stack(img_results[:len(img_lqs) // 2], dim=0)
# img_lqs: (t, c, h, w)
# img_gts: (t, c, h, w)
# key: str
return {'L': img_lqs, 'H': img_gts, 'key': key}
def __len__(self):
return len(self.keys)
class VideoRecurrentTrainNonblindDenoisingDataset(VideoRecurrentTrainDataset):
"""Video dataset for training recurrent architectures in non-blind video denoising.
Args:
Same as VideoTestDataset.
"""
def __init__(self, opt):
super(VideoRecurrentTrainNonblindDenoisingDataset, self).__init__(opt)
self.sigma_min = self.opt['sigma_min'] / 255.
self.sigma_max = self.opt['sigma_max'] / 255.
def __getitem__(self, index):
if self.file_client is None:
self.file_client = utils_video.FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
key = self.keys[index]
total_num_frames = self.total_num_frames[index]
start_frames = self.start_frames[index]
clip_name, frame_name = key.split('/') # key example: 000/00000000
# determine the neighboring frames
interval = random.choice(self.interval_list)
# ensure not exceeding the borders
start_frame_idx = int(frame_name)
endmost_start_frame_idx = start_frames + total_num_frames - self.num_frame * interval
if start_frame_idx > endmost_start_frame_idx:
start_frame_idx = random.randint(start_frames, endmost_start_frame_idx)
end_frame_idx = start_frame_idx + self.num_frame * interval
neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
# random reverse
if self.random_reverse and random.random() < 0.5:
neighbor_list.reverse()
# get the neighboring GT frames
img_gts = []
for neighbor in neighbor_list:
if self.is_lmdb:
img_gt_path = f'{clip_name}/{neighbor:{self.filename_tmpl}}'
else:
img_gt_path = self.gt_root / clip_name / f'{neighbor:{self.filename_tmpl}}.{self.filename_ext}'
# get GT
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = utils_video.imfrombytes(img_bytes, float32=True)
img_gts.append(img_gt)
# randomly crop
img_gts, _ = utils_video.paired_random_crop(img_gts, img_gts, self.gt_size, 1, img_gt_path)
# augmentation - flip, rotate
img_gts = utils_video.augment(img_gts, self.opt['use_hflip'], self.opt['use_rot'])
img_gts = utils_video.img2tensor(img_gts)
img_gts = torch.stack(img_gts, dim=0)
# we add noise in the network
noise_level = torch.empty((1, 1, 1, 1)).uniform_(self.sigma_min, self.sigma_max)
noise = torch.normal(mean=0, std=noise_level.expand_as(img_gts))
img_lqs = img_gts + noise
t, _, h, w = img_lqs.shape
img_lqs = torch.cat([img_lqs, noise_level.expand(t, 1, h, w)], 1)
# img_lqs: (t, c, h, w)
# img_gts: (t, c, h, w)
# key: str
return {'L': img_lqs, 'H': img_gts, 'key': key}
def __len__(self):
return len(self.keys)
class VideoRecurrentTrainVimeoDataset(data.Dataset):
"""Vimeo90K dataset for training recurrent networks.
The keys are generated from a meta info txt file.
basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt
Each line contains:
1. clip name; 2. frame number; 3. image shape, separated by a white space.
Examples:
00001/0001 7 (256,448,3)
00001/0002 7 (256,448,3)
Key examples: "00001/0001"
GT (gt): Ground-Truth;
LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames.
The neighboring frame list for different num_frame:
num_frame | frame list
1 | 4
3 | 3,4,5
5 | 2,3,4,5,6
7 | 1,2,3,4,5,6,7
Args:
opt (dict): Config for train dataset. It contains the following keys:
dataroot_gt (str): Data root path for gt.
dataroot_lq (str): Data root path for lq.
meta_info_file (str): Path for meta information file.
io_backend (dict): IO backend type and other kwarg.
num_frame (int): Window size for input frames.
gt_size (int): Cropped patched size for gt patches.
random_reverse (bool): Random reverse input frames.
use_hflip (bool): Use horizontal flips.
use_rot (bool): Use rotation (use vertical flip and transposing h
and w for implementation).
scale (bool): Scale, which will be added automatically.
"""
def __init__(self, opt):
super(VideoRecurrentTrainVimeoDataset, self).__init__()
self.opt = opt
self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq'])
with open(opt['meta_info_file'], 'r') as fin:
self.keys = [line.split(' ')[0] for line in fin]
# file client (io backend)
self.file_client = None
self.io_backend_opt = opt['io_backend']
self.is_lmdb = False
if self.io_backend_opt['type'] == 'lmdb':
self.is_lmdb = True
self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root]
self.io_backend_opt['client_keys'] = ['lq', 'gt']
# indices of input images
self.neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])]
# temporal augmentation configs
self.random_reverse = opt['random_reverse']
print(f'Random reverse is {self.random_reverse}.')
self.flip_sequence = opt.get('flip_sequence', False)
self.pad_sequence = opt.get('pad_sequence', False)
self.neighbor_list = [1, 2, 3, 4, 5, 6, 7]
def __getitem__(self, index):
if self.file_client is None:
self.file_client = utils_video.FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
# random reverse
if self.random_reverse and random.random() < 0.5:
self.neighbor_list.reverse()
scale = self.opt['scale']
gt_size = self.opt['gt_size']
key = self.keys[index]
clip, seq = key.split('/') # key example: 00001/0001
# get the neighboring LQ and GT frames
img_lqs = []
img_gts = []
for neighbor in self.neighbor_list:
if self.is_lmdb:
img_lq_path = f'{clip}/{seq}/im{neighbor}'
img_gt_path = f'{clip}/{seq}/im{neighbor}'
else:
img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png'
img_gt_path = self.gt_root / clip / seq / f'im{neighbor}.png'
# LQ
img_bytes = self.file_client.get(img_lq_path, 'lq')
img_lq = utils_video.imfrombytes(img_bytes, float32=True)
# GT
img_bytes = self.file_client.get(img_gt_path, 'gt')
img_gt = utils_video.imfrombytes(img_bytes, float32=True)
img_lqs.append(img_lq)
img_gts.append(img_gt)
# randomly crop
img_gts, img_lqs = utils_video.paired_random_crop(img_gts, img_lqs, gt_size, scale, img_gt_path)
# augmentation - flip, rotate
img_lqs.extend(img_gts)
img_results = utils_video.augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot'])
img_results = utils_video.img2tensor(img_results)
img_lqs = torch.stack(img_results[:7], dim=0)
img_gts = torch.stack(img_results[7:], dim=0)
if self.flip_sequence: # flip the sequence: 7 frames to 14 frames
img_lqs = torch.cat([img_lqs, img_lqs.flip(0)], dim=0)
img_gts = torch.cat([img_gts, img_gts.flip(0)], dim=0)
elif self.pad_sequence: # pad the sequence: 7 frames to 8 frames
img_lqs = torch.cat([img_lqs, img_lqs[-1:,...]], dim=0)
img_gts = torch.cat([img_gts, img_gts[-1:,...]], dim=0)
# img_lqs: (t, c, h, w)
# img_gt: (c, h, w)
# key: str
return {'L': img_lqs, 'H': img_gts, 'key': key}
def __len__(self):
return len(self.keys)
| 15,730 | 39.648579 | 133 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/networks/equilibrium_u_net.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans, out_chans, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob)
)
# self.layers.apply(self.init_weights)
def init_weights(self, m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
m.bias.data.fill_(0.001)
if type(m) == nn.Conv2d:
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose layers followed by
instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans, out_chans):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
self.layers.apply(self.init_weights)
def init_weights(self, m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
m.bias.data.fill_(0.001)
if type(m) == nn.ConvTranspose2d:
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
class ZerosNet(nn.Module):
def __init__(self):
super(ZerosNet, self).__init__()
def forward(self, input):
return input*0.0 + 0.0
class UnetModel(nn.Module):
"""
PyTorch implementation of a U-Net model.
This is based on:
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical image
computing and computer-assisted intervention, pages 234–241. Springer, 2015.
"""
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input to the U-Net model.
out_chans (int): Number of channels in the output to the U-Net model.
chans (int): Number of output channels of the first convolution layer.
num_pool_layers (int): Number of down-sampling and up-sampling layers.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for i in range(num_pool_layers - 1):
self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for i in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]
ch //= 2
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)]
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
stack = []
output = input
# Apply down-sampling layers
for i, layer in enumerate(self.down_sample_layers):
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/botton if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
output = torch.clamp(output, -1,1)
return output
| 6,704 | 34.664894 | 98 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/networks/normalized_equilibrium_u_net.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
from utils.spectral_norm import conv_spectral_norm
import utils.spectral_norm_chen as chen
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans, out_chans, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
layers = []
layers.append(conv_spectral_norm(nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
sigma=1.0, out_channels=out_chans))
layers.append(nn.LeakyReLU(negative_slope=0.2, inplace=True))
layers.append(conv_spectral_norm(nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
sigma=1.0, out_channels=out_chans))
layers.append(nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose layers followed by
instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans, out_chans):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
layers = []
layers.append(conv_spectral_norm(nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),
sigma=1.0, out_channels=out_chans, leakflag=True))
layers.append(nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
class ZerosNet(nn.Module):
def __init__(self):
super(ZerosNet, self).__init__()
def forward(self, input):
return input*0.0 + 0.0
class UnetModel(nn.Module):
"""
PyTorch implementation of a U-Net model.
This is based on:
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical image
computing and computer-assisted intervention, pages 234–241. Springer, 2015.
"""
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input to the U-Net model.
out_chans (int): Number of channels in the output to the U-Net model.
chans (int): Number of output channels of the first convolution layer.
num_pool_layers (int): Number of down-sampling and up-sampling layers.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for i in range(num_pool_layers - 1):
self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for i in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]
ch //= 2
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
layers = []
layers.append(ConvBlock(ch * 2, ch, drop_prob))
layers.append(conv_spectral_norm(nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
sigma=1.0, out_channels=self.out_chans, kernelsize=1))
self.up_conv += [nn.Sequential(*layers)]
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
stack = []
output = input
# Apply down-sampling layers
for i, layer in enumerate(self.down_sample_layers):
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/botton if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
return output
class DnCNN(nn.Module):
def __init__(self, channels, num_of_layers=17, lip=1.0):
super(DnCNN, self).__init__()
kernel_size = 3
padding = 1
features = 64
layers = []
layers.append(chen.spectral_norm(nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False)))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_of_layers-2):
layers.append(chen.spectral_norm(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False)))
layers.append(nn.BatchNorm2d(features))
layers.append(nn.ReLU(inplace=True))
layers.append(chen.spectral_norm(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False)))
self.dncnn = nn.Sequential(*layers)
def forward(self, x):
out = self.dncnn(x)
return out
| 7,553 | 38.34375 | 155 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/networks/twolayer_linear_net.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class LinearNet(nn.Module):
def __init__(self, input_size, bottleneck_size, output_size):
super().__init__()
# self.linear_layer = nn.Linear(input_size, output_size)
# self.linear_layer2 = nn.Linear(output_size, output_size)
self.network = nn.Sequential(
nn.Linear(input_size, bottleneck_size),
nn.ReLU(),
nn.Linear(bottleneck_size, bottleneck_size),
nn.ReLU(),
nn.Linear(bottleneck_size, output_size),
nn.Tanh()
)
self.network.apply(self.init_weights)
def init_weights(self, m):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, mean=0.0, std=0.01)
m.bias.data.fill_(0.01)
def forward(self, input):
input_shape = input.shape
output = self.network(torch.flatten(input, start_dim=1))
output = torch.reshape(output, shape=input_shape)
return output
| 1,205 | 29.923077 | 66 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/networks/resnet.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
import torch
import torch.nn as nn
class nblock_resnet(nn.Module):
def __init__(self, inc=2, onc=2, n_residual_blocks=2):
super(nblock_resnet, self).__init__()
n_interm_c = 128
init_layer = nn.Conv2d(inc, n_interm_c, 1, 1, 0)
model = [init_layer]
# residual blocks
for _ in range(n_residual_blocks):
block = residual_block(n_interm_c)
model = model + [block]
# 1x1 convolutions
conv_layer0 = nn.Conv2d(n_interm_c, n_interm_c, 1, 1, 0)
act0 = nn.LeakyReLU(0.1, inplace=True)
conv_layer1 = nn.Conv2d(n_interm_c, n_interm_c, 1, 1, 0)
act1 = nn.LeakyReLU(0.1, inplace=True)
conv_layer2 = nn.Conv2d(n_interm_c, onc, 1, 1, 0)
act2 = nn.LeakyReLU(0.1, inplace=True)
model = model + [conv_layer0, act0, conv_layer1, act1, conv_layer2, act2]
self.model = nn.Sequential(*model)
def forward(self, input):
patch_means = torch.mean(input, dim=(2, 3), keepdim=True)
input -= patch_means
return patch_means + self.model(input)
class residual_block(nn.Module):
def __init__(self, nc):
super(residual_block, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(nc, nc, 3, 1, 1),
nn.BatchNorm2d(nc),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(nc, nc, 3, 1, 1),
nn.BatchNorm2d(nc),
nn.LeakyReLU(0.1, inplace=True)
)
def forward(self, input):
return torch.clamp(input + self.model(input), min=-1, max=1) | 1,833 | 30.62069 | 81 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/networks/u_net.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import torch
from torch import nn
from torch.nn import functional as F
class ConvBlock(nn.Module):
"""
A Convolutional Block that consists of two convolution layers each followed by
instance normalization, LeakyReLU activation and dropout.
"""
def __init__(self, in_chans, out_chans, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.drop_prob = drop_prob
self.layers = nn.Sequential(
nn.Conv2d(in_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob),
nn.Conv2d(out_chans, out_chans, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Dropout2d(drop_prob)
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
class TransposeConvBlock(nn.Module):
"""
A Transpose Convolutional Block that consists of one convolution transpose layers followed by
instance normalization and LeakyReLU activation.
"""
def __init__(self, in_chans, out_chans):
"""
Args:
in_chans (int): Number of channels in the input.
out_chans (int): Number of channels in the output.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.layers = nn.Sequential(
nn.ConvTranspose2d(in_chans, out_chans, kernel_size=2, stride=2, bias=False),
nn.InstanceNorm2d(out_chans),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
)
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
return self.layers(input)
class ZerosNet(nn.Module):
def __init__(self):
super(ZerosNet, self).__init__()
def forward(self, input):
return input*0.0 + 0.0
class UnetModel(nn.Module):
"""
PyTorch implementation of a U-Net model.
This is based on:
Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks
for biomedical image segmentation. In International Conference on Medical image
computing and computer-assisted intervention, pages 234–241. Springer, 2015.
"""
def __init__(self, in_chans, out_chans, chans, num_pool_layers, drop_prob):
"""
Args:
in_chans (int): Number of channels in the input to the U-Net model.
out_chans (int): Number of channels in the output to the U-Net model.
chans (int): Number of output channels of the first convolution layer.
num_pool_layers (int): Number of down-sampling and up-sampling layers.
drop_prob (float): Dropout probability.
"""
super().__init__()
self.in_chans = in_chans
self.out_chans = out_chans
self.chans = chans
self.num_pool_layers = num_pool_layers
self.drop_prob = drop_prob
self.down_sample_layers = nn.ModuleList([ConvBlock(in_chans, chans, drop_prob)])
ch = chans
for i in range(num_pool_layers - 1):
self.down_sample_layers += [ConvBlock(ch, ch * 2, drop_prob)]
ch *= 2
self.conv = ConvBlock(ch, ch * 2, drop_prob)
self.up_conv = nn.ModuleList()
self.up_transpose_conv = nn.ModuleList()
for i in range(num_pool_layers - 1):
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [ConvBlock(ch * 2, ch, drop_prob)]
ch //= 2
self.up_transpose_conv += [TransposeConvBlock(ch * 2, ch)]
self.up_conv += [
nn.Sequential(
ConvBlock(ch * 2, ch, drop_prob),
nn.Conv2d(ch, self.out_chans, kernel_size=1, stride=1),
)]
def forward(self, input):
"""
Args:
input (torch.Tensor): Input tensor of shape [batch_size, self.in_chans, height, width]
Returns:
(torch.Tensor): Output tensor of shape [batch_size, self.out_chans, height, width]
"""
stack = []
output = input
# Apply down-sampling layers
for i, layer in enumerate(self.down_sample_layers):
output = layer(output)
stack.append(output)
output = F.avg_pool2d(output, kernel_size=2, stride=2, padding=0)
output = self.conv(output)
# Apply up-sampling layers
for transpose_conv, conv in zip(self.up_transpose_conv, self.up_conv):
downsample_layer = stack.pop()
output = transpose_conv(output)
# Reflect pad on the right/botton if needed to handle odd input dimensions.
padding = [0, 0, 0, 0]
if output.shape[-1] != downsample_layer.shape[-1]:
padding[1] = 1 # Padding right
if output.shape[-2] != downsample_layer.shape[-2]:
padding[3] = 1 # Padding bottom
if sum(padding) != 0:
output = F.pad(output, padding, "reflect")
output = torch.cat([output, downsample_layer], dim=1)
output = conv(output)
output = torch.clamp(output, -1,1)
return output
class DnCNN(nn.Module):
def __init__(self, channels, num_of_layers=17, lip=1.0):
super(DnCNN, self).__init__()
kernel_size = 3
padding = 1
features = 64
layers = []
layers.append(nn.Conv2d(in_channels=channels, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_of_layers-2):
layers.append(nn.Conv2d(in_channels=features, out_channels=features, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.BatchNorm2d(features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=features, out_channels=channels, kernel_size=kernel_size, padding=padding, bias=False))
self.dncnn = nn.Sequential(*layers)
def forward(self, x):
out = self.dncnn(x)
return out | 7,115 | 36.0625 | 135 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/training/denoiser_training.py | import torch
import numpy as np
from solvers import new_equilibrium_utils as eq_utils
from torch import autograd
from utils import cg_utils
import gc
def train_denoiser(denoising_net, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
for epoch in range(start_epoch, n_epochs):
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': denoising_net.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': denoising_net.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
reconstruction = y + denoising_net(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
# exit()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
# for obj in gc.get_objects():
# try:
# if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
# print(type(obj), obj.size())
# except:
# pass
# for name, val in denoising_net.named_parameters():
# print(name)
# exit()
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': denoising_net.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': denoising_net.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_precond(single_iterate_solver, train_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, deep_eq_module,
use_dataparallel=False, device='cpu', scheduler=None, noise_sigma=0.000001, precond_iterates=100,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, forward_operator = None,
test_dataloader = None):
for epoch in range(start_epoch, n_epochs):
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
if forward_operator is not None:
with torch.no_grad():
initial_point = cg_utils.conjugate_gradient(initial_point=forward_operator.adjoint(y),
ATA=forward_operator.gramian,
regularization_lambda=noise_sigma, n_iterations=precond_iterates)
reconstruction = deep_eq_module.forward(y, initial_point)
else:
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_mnist(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, max_iters=100):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 20:
n_iterations[ee] = 5
if ee >= 23:
n_iterations[ee] = 7
if ee >= 28:
n_iterations[ee] = 9
if ee >= 38:
n_iterations[ee] = 11
if ee >= 44:
n_iterations[ee] = 13
if ee >= 50:
n_iterations[ee] = 20
if ee >= 58:
n_iterations[ee] = 30
forward_iterator = eq_utils.anderson
deep_eq_module = eq_utils.DEQFixedPoint(single_iterate_solver, solver=forward_iterator,
m=5, lam=1e-4, max_iter=max_iters, tol=1e-3, beta=1.5)
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
| 10,567 | 44.551724 | 130 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/training/standard_training.py | import torch
import numpy as np
def train_solver(solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, forward_model=None,
use_dataparallel=False, device='cpu', scheduler=None, n_blocks=10,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
n_blocks = 6
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
if forward_model is None:
initial_point = y
else:
initial_point = forward_model.adjoint(y)
reconstruction = solver(initial_point, iterations=n_blocks)
reconstruction = torch.clamp(reconstruction, -1 ,1)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
#####################TEST##########################
loss_accumulator = []
mse_loss = torch.nn.MSELoss()
for ii, sample_batch in enumerate(test_dataloader):
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
initial_point = y
reconstruction = solver(initial_point, iterations=n_blocks)
reconstruction = torch.clamp(reconstruction, -1 ,1)
loss = mse_loss(reconstruction, sample_batch)
loss_logger = loss.cpu().detach().numpy()
loss_accumulator.append(loss_logger)
loss_array = np.asarray(loss_accumulator)
loss_mse = np.mean(loss_array)
PSNR = -10 * np.log10(loss_mse)
percentiles = np.percentile(loss_array, [25,50,75])
percentiles = -10.0*np.log10(percentiles)
print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
print("MEAN TEST PSNR: " + str(PSNR), flush=True)
print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
def train_solver_mnist(solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
initial_point = y
reconstruction = solver(initial_point, iterations=6)
reconstruction = torch.clamp(reconstruction, -1 ,1)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
#####################TEST##########################
loss_accumulator = []
mse_loss = torch.nn.MSELoss()
for ii, sample_batch in enumerate(test_dataloader):
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
initial_point = y
reconstruction = solver(initial_point, iterations=6)
reconstruction = torch.clamp(reconstruction, -1 ,1)
loss = mse_loss(reconstruction, sample_batch)
loss_logger = loss.cpu().detach().numpy()
loss_accumulator.append(loss_logger)
loss_array = np.asarray(loss_accumulator)
loss_mse = np.mean(loss_array)
PSNR = -10 * np.log10(loss_mse)
percentiles = np.percentile(loss_array, [25,50,75])
percentiles = -10.0*np.log10(percentiles)
print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
print("MEAN TEST PSNR: " + str(PSNR), flush=True)
print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
| 6,265 | 40.496689 | 89 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/training/new_equilibrium_training.py | import torch
import numpy as np
from solvers import new_equilibrium_utils as eq_utils
from torch import autograd
def train_solver(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, forward_iterator, iterator_kwargs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
forward_iterator = eq_utils.anderson
deep_eq_module = eq_utils.DEQFixedPoint(single_iterate_solver, forward_iterator, iterator_kwargs)
for epoch in range(start_epoch, n_epochs):
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_noanderson(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, max_iters=100):
forward_iterator = eq_utils.forward_iteration
deep_eq_module = eq_utils.DEQFixedPoint(single_iterate_solver, solver=forward_iterator,
max_iter=max_iters, tol=1e-3)
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_mnist(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, max_iters=100):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 20:
n_iterations[ee] = 5
if ee >= 23:
n_iterations[ee] = 7
if ee >= 28:
n_iterations[ee] = 9
if ee >= 38:
n_iterations[ee] = 11
if ee >= 44:
n_iterations[ee] = 13
if ee >= 50:
n_iterations[ee] = 20
if ee >= 58:
n_iterations[ee] = 30
forward_iterator = eq_utils.anderson
deep_eq_module = eq_utils.DEQFixedPointNeumann(single_iterate_solver, neumann_k=100, solver=forward_iterator,
m=5, lam=1e-4, max_iter=max_iters, tol=1e-3, beta=1.5)
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
| 10,141 | 45.1 | 113 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/training/refactor_equilibrium_training.py | import torch
import numpy as np
from solvers import new_equilibrium_utils as eq_utils
from torch import autograd
from utils import cg_utils
def train_solver(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, deep_eq_module,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
for epoch in range(start_epoch, n_epochs):
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_precond(single_iterate_solver, train_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, deep_eq_module,
use_dataparallel=False, device='cpu', scheduler=None, noise_sigma=0.000001, precond_iterates=100,
print_every_n_steps=2, save_every_n_epochs=5, start_epoch=0, forward_operator = None,
test_dataloader = None):
previous_loss = 10.0
reset_flag = False
for epoch in range(start_epoch, n_epochs):
if reset_flag:
save_state_dict = torch.load(save_location)
single_iterate_solver.load_state_dict(save_state_dict['solver_state_dict'])
optimizer.load_state_dict(save_state_dict['optimizer_state_dict'])
reset_flag = False
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
target_img = sample_batch[1].to(device=device)
y = measurement_process(sample_batch)
if forward_operator is not None:
with torch.no_grad():
initial_point = forward_operator.adjoint(y)
reconstruction = deep_eq_module.forward(y, initial_point=initial_point)
else:
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, target_img)
if np.isnan(loss.item()):
reset_flag = True
break
loss.backward()
optimizer.step()
if ii == 0:
previous_loss = loss.item()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if ii % 200 == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
if (previous_loss - loss.item()) / previous_loss < -10.0 or np.isnan(loss.item()):
reset_flag = True
if scheduler is not None:
scheduler.step(epoch)
if not reset_flag:
if use_dataparallel:
# torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
# torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_precond1(single_iterate_solver, train_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs, deep_eq_module,
use_dataparallel=False, device='cpu', scheduler=None, noise_sigma=0.000001, precond_iterates=100,
print_every_n_steps=2, save_every_n_epochs=5, start_epoch=0, forward_operator = None,
test_dataloader = None):
previous_loss = 10.0
reset_flag = False
for epoch in range(start_epoch, n_epochs):
if reset_flag:
save_state_dict = torch.load(save_location)
single_iterate_solver.load_state_dict(save_state_dict['solver_state_dict'])
optimizer.load_state_dict(save_state_dict['optimizer_state_dict'])
reset_flag = False
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
if forward_operator is not None:
with torch.no_grad():
initial_point = cg_utils.conjugate_gradient(initial_point=forward_operator.adjoint(y),
ATA=forward_operator.gramian,
regularization_lambda=noise_sigma, n_iterations=precond_iterates)
reconstruction = deep_eq_module.forward(y, initial_point=initial_point)
else:
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
if np.isnan(loss.item()):
reset_flag = True
break
loss.backward()
optimizer.step()
if ii == 0:
previous_loss = loss.item()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if ii % 200 == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch+1,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
if (previous_loss - loss.item()) / previous_loss < -10.0 or np.isnan(loss.item()):
reset_flag = True
if scheduler is not None:
scheduler.step(epoch)
if not reset_flag:
if use_dataparallel:
# torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
# torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
# 'epoch': epoch,
# 'optimizer_state_dict': optimizer.state_dict(),
# 'scheduler_state_dict': scheduler.state_dict()
# }, save_location + "_" + str(epoch))
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
def train_solver_mnist(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0, max_iters=100):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 20:
n_iterations[ee] = 5
if ee >= 23:
n_iterations[ee] = 7
if ee >= 28:
n_iterations[ee] = 9
if ee >= 38:
n_iterations[ee] = 11
if ee >= 44:
n_iterations[ee] = 13
if ee >= 50:
n_iterations[ee] = 20
if ee >= 58:
n_iterations[ee] = 30
forward_iterator = eq_utils.anderson
deep_eq_module = eq_utils.DEQFixedPoint(single_iterate_solver, solver=forward_iterator,
m=5, lam=1e-4, max_iter=max_iters, tol=1e-3, beta=1.5)
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = deep_eq_module.forward(y)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
optimizer.step()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
| 16,122 | 47.272455 | 130 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/training/equilibrium_training.py | import torch
import numpy as np
from solvers import equilibrium_utils as eq_utils
from torch import autograd
def train_solver(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 5:
n_iterations[ee] = 5
if ee >= 8:
n_iterations[ee] = 8
if ee >= 10:
n_iterations[ee] = 10
if ee >= 12:
n_iterations[ee] = 15
if ee >= 15:
n_iterations[ee] = 20
for epoch in range(start_epoch, n_epochs):
# We are lucky to have
if epoch % save_every_n_epochs == 0:
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch.to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver, max_iterations=n_iterations[epoch])
reconstruction = torch.clamp(reconstruction, -1, 1)
loss = loss_function(reconstruction, sample_batch)
if epoch < 2:
loss.backward()
optimizer.step()
else:
# f_zstar = single_iterate_solver(static_zstar)
# delf_deltheta = torch.autograd.grad(inputs=static_zstar, outputs=f_zstar,
# grad_outputs=torch.ones_like(f_zstar))
dell_delz = torch.autograd.grad(inputs=reconstruction, outputs=loss,
grad_outputs=torch.ones_like(loss))[0]
delf_deltheta_invJ = eq_utils.conjugate_gradient_equilibriumgrad(b=dell_delz,
input_z=reconstruction,
f_function=single_iterate_solver,
n_iterations=5)
# loss.backward(retain_graph=True)
torch.autograd.backward(tensors=reconstruction, grad_tensors=delf_deltheta_invJ)
optimizer.step()
# exit()
# for name, param in single_iterate_solver.named_parameters():
# jj = 0
# if param.grad is not None:
# print(name)
# print(param.shape)
# print(param.grad.shape)
# jj+=1
# if jj == 2:
# break
# exit()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
def train_solver_mnist(single_iterate_solver, train_dataloader, test_dataloader,
measurement_process, optimizer,
save_location, loss_function, n_epochs,
use_dataparallel=False, device='cpu', scheduler=None,
print_every_n_steps=10, save_every_n_epochs=5, start_epoch=0):
n_iterations = [5]*n_epochs
for ee in range(n_epochs):
if ee >= 20:
n_iterations[ee] = 5
if ee >= 23:
n_iterations[ee] = 7
if ee >= 28:
n_iterations[ee] = 9
if ee >= 38:
n_iterations[ee] = 11
if ee >= 44:
n_iterations[ee] = 13
if ee >= 50:
n_iterations[ee] = 20
if ee >= 58:
n_iterations[ee] = 30
for epoch in range(start_epoch, n_epochs):
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
for ii, sample_batch in enumerate(train_dataloader):
optimizer.zero_grad()
sample_batch = sample_batch[0].to(device=device)
y = measurement_process(sample_batch)
single_iterate_solver.set_initial_point(y)
def jacobian_vector_product(f, z, v):
z = z.detach().requires_grad_()
v = v.detach().requires_grad_()
vjp_val = autograd.grad(f(z), z, v, create_graph=True)[0]
return vjp_val
# jvp_val = autograd.grad(vjp_val, v, v.detach(), create_graph=True)[0]
# return jvp_val
if epoch < 10:
reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver,
max_iterations=n_iterations[epoch])
reconstruction = torch.clamp(reconstruction, 0, 1)
loss = loss_function(reconstruction, sample_batch)
loss.backward()
# for name, param in single_iterate_solver.named_parameters():
# if param.grad is not None:
# print(name)
# print(param.grad.shape)
# torch.autograd.backward(reconstruction, grad_tensors=reconstruction)
# for name, param in single_iterate_solver.named_parameters():
# if param.grad is not None:
# print(name)
# print(param.grad.shape)
# print(autograd.functional.jacobian(single_iterate_solver, reconstruction).shape)
# exit()
optimizer.step()
else:
exit()
# f_zstar = single_iterate_solver(static_zstar)
# reconstruction = single_iterate_solver(sample_batch)
# reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver,
# max_iterations=n_iterations[epoch])
reconstruction = eq_utils.get_equilibrium_point(y, single_iterate_solver,
max_iterations=n_iterations[epoch])
reconstruction = torch.clamp(reconstruction, 0, 1)
loss = loss_function(reconstruction, sample_batch)
# delf_deltheta = torch.autograd.grad(inputs=static_zstar, outputs=f_zstar,
# grad_outputs=torch.ones_like(f_zstar))
dell_delz = torch.autograd.grad(inputs=reconstruction, outputs=loss,
grad_outputs=torch.ones_like(loss))[0]
# delf_deltheta_invJ = eq_utils.conjugate_gradient_equilibriumgrad(b=dell_delz,
# input_z=sample_batch.requires_grad_(),
# f_function=single_iterate_solver,
# n_iterations=10)
# torch.autograd.backward(tensors=single_iterate_solver(sample_batch), grad_tensors=delf_deltheta_invJ)
delf_deltheta_invJ = eq_utils.conjugate_gradient_equilibriumgrad(b=dell_delz,
input_z=reconstruction,
f_function=single_iterate_solver,
n_iterations=10)
torch.autograd.backward(tensors=reconstruction, grad_tensors=-delf_deltheta_invJ)
torch.nn.utils.clip_grad_norm_(single_iterate_solver.parameters(), 1.0)
# for name, param in single_iterate_solver.named_parameters():
# if param.grad is not None:
# print(name)
# print(torch.norm(param.grad))
# jacobian_vect_product = delf_deltheta_invJ#.flatten(start_dim=1)
# vector_jacobian_product = jacobian_vector_product(single_iterate_solver, reconstruction, jacobian_vect_product)
# print(vector_jacobian_product.shape)
# exit()
# gradient = torch.reshape(jacobian_vect_product, (8,1,28,28))
# gradient = torch.squeeze(torch.mean(gradient, dim=0))
# print(single_iterate_solver.nonlinear_op.linear_layer(torch.flatten(delf_deltheta_invJ, start_dim=1)))
# print(delf_deltheta_invJ.shape)
#
# exit()
# torch.autograd.backward(tensors=reconstruction, grad_tensors=delf_deltheta_invJ)
optimizer.step()
# exit()
# for name, param in single_iterate_solver.named_parameters():
# jj = 0
# if param.grad is not None:
# print(name)
# print(param.shape)
# print(param.grad.shape)
# jj+=1
# if jj == 2:
# break
# exit()
if ii % print_every_n_steps == 0:
logging_string = "Epoch: " + str(epoch) + " Step: " + str(ii) + \
" Loss: " + str(loss.cpu().detach().numpy())
print(logging_string, flush=True)
if scheduler is not None:
scheduler.step(epoch)
if use_dataparallel:
torch.save({'solver_state_dict': single_iterate_solver.module.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
else:
torch.save({'solver_state_dict': single_iterate_solver.state_dict(),
'epoch': epoch,
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict()
}, save_location)
#####################TEST##########################
# loss_accumulator = []
# mse_loss = torch.nn.MSELoss()
# for ii, sample_batch in enumerate(test_dataloader):
# sample_batch = sample_batch.to(device=device)
# y = measurement_process(sample_batch)
# initial_point = y
# reconstruction = solver(initial_point, iterations=6)
#
# reconstruction = torch.clamp(reconstruction, -1 ,1)
#
# loss = mse_loss(reconstruction, sample_batch)
# loss_logger = loss.cpu().detach().numpy()
# loss_accumulator.append(loss_logger)
#
# loss_array = np.asarray(loss_accumulator)
# loss_mse = np.mean(loss_array)
# PSNR = -10 * np.log10(loss_mse)
# percentiles = np.percentile(loss_array, [25,50,75])
# percentiles = -10.0*np.log10(percentiles)
# print("TEST LOSS: " + str(sum(loss_accumulator) / len(loss_accumulator)), flush=True)
# print("MEAN TEST PSNR: " + str(PSNR), flush=True)
# print("TEST PSNR QUARTILES AND MEDIAN: " + str(percentiles[0]) +
# ", " + str(percentiles[1]) + ", " + str(percentiles[2]), flush=True)
| 14,024 | 44.684039 | 129 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/scripts/fixedpoint/deblur_proxgrad_fixedeta_pre.py | import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import operators.blurs as blurs
from operators.operator import OperatorPlusNoise
from utils.celeba_dataloader import CelebaTrainingDatasetSubset, CelebaTestDataset
from networks.normalized_equilibrium_u_net import UnetModel, DnCNN
from solvers.equilibrium_solvers import EquilibriumProxGrad
from training import refactor_equilibrium_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.9)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_blur_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 3
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 2
save_every_n_epochs = 1
initial_eta = 0.2
initial_data_points = 10000
# point this towards your celeba files
data_location = "/share/data/vision-greg2/mixpatch/img_align_celeba/"
kernel_size = 5
kernel_sigma = 5.0
noise_sigma = 1e-2
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = "/share/data/willett-group/users/gilton/denoisers/celeba_denoiser_normunet_3.ckpt"
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
transform = transforms.Compose(
[
transforms.Resize((128, 128)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
celeba_train_size = 162770
total_data = initial_data_points
total_indices = random.sample(range(celeba_train_size), k=total_data)
initial_indices = total_indices
dataset = CelebaTrainingDatasetSubset(data_location, subset_indices=initial_indices, transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True,
)
test_dataset = CelebaTestDataset(data_location, transform=transform)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False, drop_last=True,
)
### Set up solver and problem setting
forward_operator = blurs.GaussianBlur(sigma=kernel_sigma, kernel_size=kernel_size,
n_channels=3, n_spatial_dimensions=2).to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
internal_forward_operator = blurs.GaussianBlur(sigma=kernel_sigma, kernel_size=kernel_size,
n_channels=3, n_spatial_dimensions=2).to(device=device)
# standard u-net
# learned_component = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
# drop_prob=0.0, chans=32)
learned_component = DnCNN(channels=n_channels)
if os.path.exists(load_location):
if torch.cuda.is_available():
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
start_epoch = saved_dict['epoch']
learned_component.load_state_dict(saved_dict['solver_state_dict'])
# learned_component = Autoencoder()
solver = EquilibriumProxGrad(linear_operator=internal_forward_operator, nonlinear_operator=learned_component,
eta=initial_eta, minval=-1, maxval = 1)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(save_location):
if not cpu_only:
saved_dict = torch.load(save_location)
else:
saved_dict = torch.load(save_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss(reduction='sum')
forward_iterator = eq_utils.andersonexp
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-2,
max_iter=max_iters, tol=1e-5)
# forward_iterator = eq_utils.forward_iteration
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=100, tol=1e-8)
# Do train
refactor_equilibrium_training.train_solver_precond1(
single_iterate_solver=solver, train_dataloader=dataloader, test_dataloader=test_dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
deep_eq_module=deep_eq_module, loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch, forward_operator = forward_operator, noise_sigma=noise_sigma,
precond_iterates=60)
| 6,779 | 38.418605 | 122 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/scripts/fixedpoint/mri_grad_fixedeta_pre_and4.py | import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
import operators.singlecoil_mri as mrimodel
from operators.operator import OperatorPlusNoise
from utils.fastmri_dataloader import singleCoilFastMRIDataloader
from networks.normalized_equilibrium_u_net import UnetModel, DnCNN
from solvers.equilibrium_solvers import EquilibriumProxGradMRI, EquilibriumGrad
from training import refactor_equilibrium_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.4)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--acceleration', type=float, default=4.0)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
parser.add_argument('--loadpath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 2
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 2
save_every_n_epochs = 1
initial_eta = float(args.etainit)
dataheight = 320
datawidth = 320
mri_center_fraction = 0.04
mri_acceleration = float(args.acceleration)
mask = mrimodel.create_mask(shape=[dataheight, datawidth, 2], acceleration=mri_acceleration,
center_fraction=mri_center_fraction, seed=10)
noise_sigma = 1e-2
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = "/share/data/willett-group/users/gilton/denoisers/mri_denoiser_unetnorm_4.ckpt"
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
data_location = "/share/data/vision-greg2/users/gilton/singlecoil_curated_clean/"
trainset_size = 2000
total_data = 2194
random.seed(10)
all_indices = list(range(trainset_size))
train_indices = random.sample(range(total_data), k=trainset_size)
dataset = singleCoilFastMRIDataloader(data_location, data_indices=train_indices)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
)
### Set up solver and problem setting
forward_operator = mrimodel.cartesianSingleCoilMRI(kspace_mask=mask).to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
internal_forward_operator = mrimodel.cartesianSingleCoilMRI(kspace_mask=mask).to(device=device)
# standard u-net
# learned_component = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
# drop_prob=0.0, chans=32)
learned_component = DnCNN(channels=n_channels)
cpu_only = not torch.cuda.is_available()
if os.path.exists(load_location):
if not cpu_only:
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
learned_component.load_state_dict(saved_dict['solver_state_dict'])
# learned_component = Autoencoder()
solver = EquilibriumGrad(linear_operator=internal_forward_operator, nonlinear_operator=learned_component,
eta=initial_eta, minval=-1, maxval = 1)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(save_location):
if not cpu_only:
saved_dict = torch.load(save_location)
else:
saved_dict = torch.load(save_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss(reduction='sum')
forward_iterator = eq_utils.andersonexp
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-2,
max_iter=max_iters, tol=1e-5)
# forward_iterator = eq_utils.forward_iteration
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=max_iters, tol=1e-8)
# Do train
refactor_equilibrium_training.train_solver_precond(
single_iterate_solver=solver, train_dataloader=dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
deep_eq_module=deep_eq_module, loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch, forward_operator = forward_operator, noise_sigma=0.3,
precond_iterates=50)
| 6,531 | 39.320988 | 121 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/scripts/fixedpoint/mri_prox_fixedeta_pre_and.py | import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
import operators.singlecoil_mri as mrimodel
from operators.operator import OperatorPlusNoise
from utils.fastmri_dataloader import singleCoilFastMRIDataloader
from networks.normalized_equilibrium_u_net import UnetModel, DnCNN
from solvers.equilibrium_solvers import EquilibriumProxGradMRI
from training import refactor_equilibrium_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.4)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--acceleration', type=float, default=8.0)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
parser.add_argument('--loadpath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 2
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 2
save_every_n_epochs = 1
initial_eta = float(args.etainit)
dataheight = 320
datawidth = 320
mri_center_fraction = 0.04
mri_acceleration = float(args.acceleration)
mask = mrimodel.create_mask(shape=[dataheight, datawidth, 2], acceleration=mri_acceleration,
center_fraction=mri_center_fraction, seed=10)
noise_sigma = 1e-2
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = "/share/data/willett-group/users/gilton/denoisers/mri_denoiser_unetnorm_4.ckpt"
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
data_location = "/share/data/vision-greg2/users/gilton/singlecoil_curated_clean/"
trainset_size = 2000
total_data = 2194
random.seed(10)
all_indices = list(range(trainset_size))
train_indices = random.sample(range(total_data), k=trainset_size)
dataset = singleCoilFastMRIDataloader(data_location, data_indices=train_indices)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
)
### Set up solver and problem setting
forward_operator = mrimodel.cartesianSingleCoilMRI(kspace_mask=mask).to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
internal_forward_operator = mrimodel.cartesianSingleCoilMRI(kspace_mask=mask).to(device=device)
# standard u-net
# learned_component = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
# drop_prob=0.0, chans=32)
learned_component = DnCNN(channels=n_channels)
cpu_only = not torch.cuda.is_available()
if os.path.exists(load_location):
if not cpu_only:
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
learned_component.load_state_dict(saved_dict['solver_state_dict'])
# learned_component = Autoencoder()
solver = EquilibriumProxGradMRI(linear_operator=internal_forward_operator, nonlinear_operator=learned_component,
eta=initial_eta, minval=-1, maxval = 1)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(save_location):
if not cpu_only:
saved_dict = torch.load(save_location)
else:
saved_dict = torch.load(save_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss(reduction='sum')
forward_iterator = eq_utils.andersonexp
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-2,
max_iter=max_iters, tol=1e-4)
# forward_iterator = eq_utils.forward_iteration
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=max_iters, tol=1e-8)
# Do train
refactor_equilibrium_training.train_solver_precond(
single_iterate_solver=solver, train_dataloader=dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
deep_eq_module=deep_eq_module, loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch, forward_operator = forward_operator, noise_sigma=0.3,
precond_iterates=50)
| 6,521 | 39.259259 | 121 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/scripts/denoising/gaussian_dncnn_norm_denoise.py | import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import operators.operator as lin_operator
from operators.operator import OperatorPlusNoise
from utils.celeba_dataloader import CelebaTrainingDatasetSubset, CelebaTestDataset
from networks.normalized_cnn_2 import DnCNN
from training import denoiser_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--noise_sigma', type=float, default=0.01)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_blur_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 3
in_channels = 3
out_channels = 3
learning_rate = float(args.lr)
print_every_n_steps = 10
save_every_n_epochs = 5
initial_data_points = 10000
# point this towards your celeba files
data_location = "/share/data/vision-greg2/mixpatch/img_align_celeba/"
noise_sigma = float(args.noise_sigma)
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = args.savepath
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
transform = transforms.Compose(
[
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
celeba_train_size = 162770
total_data = initial_data_points
total_indices = random.sample(range(celeba_train_size), k=total_data)
initial_indices = total_indices
dataset = CelebaTrainingDatasetSubset(data_location, subset_indices=initial_indices, transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True,
)
test_dataset = CelebaTestDataset(data_location, transform=transform)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False, drop_last=True,
)
### Set up solver and problem setting
forward_operator = lin_operator.Identity().to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
# learned_component = Autoencoder()
solver = DnCNN(in_channels=in_channels, out_channels=out_channels, internal_channels=64,
num_of_layers=17, lip=1.0)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(load_location):
if not cpu_only:
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss()
# forward_iterator = eq_utils.anderson
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-6,
# max_iter=max_iters, tol=1e-8)
forward_iterator = eq_utils.forward_iteration
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=100, tol=1e-8)
# Do train
denoiser_training.train_denoiser(denoising_net=solver, train_dataloader=dataloader, test_dataloader=test_dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch)
| 5,275 | 35.638889 | 121 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/scripts/denoising/gaussian_unet_denoise.py | import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import operators.operator as lin_operator
from operators.operator import OperatorPlusNoise
from utils.celeba_dataloader import CelebaTrainingDatasetSubset, CelebaTestDataset
from networks.equilibrium_u_net import UnetModel
from training import denoiser_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--noise_sigma', type=float, default=0.01)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_blur_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 3
learning_rate = float(args.lr)
print_every_n_steps = 10
save_every_n_epochs = 5
initial_data_points = 10000
# point this towards your celeba files
data_location = "/share/data/vision-greg2/mixpatch/img_align_celeba/"
noise_sigma = float(args.noise_sigma)
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = args.savepath
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
transform = transforms.Compose(
[
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
celeba_train_size = 162770
total_data = initial_data_points
total_indices = random.sample(range(celeba_train_size), k=total_data)
initial_indices = total_indices
dataset = CelebaTrainingDatasetSubset(data_location, subset_indices=initial_indices, transform=transform)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True,
)
test_dataset = CelebaTestDataset(data_location, transform=transform)
test_dataloader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False, drop_last=True,
)
### Set up solver and problem setting
forward_operator = lin_operator.Identity().to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
# learned_component = Autoencoder()
solver = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
drop_prob=0.0, chans=32)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(load_location):
if not cpu_only:
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss()
# Do train
denoiser_training.train_denoiser(denoising_net=solver, train_dataloader=dataloader, test_dataloader=test_dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch)
| 4,901 | 35.044118 | 121 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/scripts/denoising/mri_unet_denoise.py | import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
import operators.operator as lin_operator
from operators.operator import OperatorPlusNoise
from utils.fastmri_dataloader import singleCoilFastMRIDataloader
from networks.equilibrium_u_net import UnetModel
from solvers.equilibrium_solvers import EquilibriumGrad
from training import denoiser_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.5)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--acceleration', type=float, default=8.0)
parser.add_argument('--noise_sigma', type=float, default=0.01)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 2
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 10
save_every_n_epochs = 5
initial_eta = float(args.etainit)
dataheight = 320
datawidth = 320
noise_sigma = float(args.noise_sigma)
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = args.savepath
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
data_location = "/share/data/vision-greg2/users/gilton/singlecoil_curated_clean/"
trainset_size = 2000
total_data = 2194
random.seed(10)
all_indices = list(range(trainset_size))
train_indices = random.sample(range(total_data), k=trainset_size)
dataset = singleCoilFastMRIDataloader(data_location, data_indices=train_indices)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
)
### Set up solver and problem setting
forward_operator = lin_operator.Identity().to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
solver = UnetModel(in_chans=n_channels, out_chans=n_channels, num_pool_layers=4,
drop_prob=0.0, chans=32)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
# set up loss and train
lossfunction = torch.nn.MSELoss()
# Do train
denoiser_training.train_denoiser(denoising_net=solver, train_dataloader=dataloader, test_dataloader=dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch)
| 4,447 | 36.066667 | 121 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/scripts/denoising/mri_dncnn_denoise.py | import torch
import os
import random
import sys
import argparse
sys.path.append('/home-nfs/gilton/learned_iterative_solvers')
# sys.path.append('/Users/dgilton/PycharmProjects/learned_iterative_solvers')
import torch.nn as nn
import torch.optim as optim
import operators.operator as lin_operator
from operators.operator import OperatorPlusNoise
from utils.fastmri_dataloader import singleCoilFastMRIDataloader
from networks.normalized_cnn_2 import DnCNN
from solvers.equilibrium_solvers import EquilibriumGrad
from training import denoiser_training
from solvers import new_equilibrium_utils as eq_utils
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', default=80)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--and_maxiters', default=100)
parser.add_argument('--and_beta', type=float, default=1.0)
parser.add_argument('--and_m', type=int, default=5)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--etainit', type=float, default=0.5)
parser.add_argument('--lr_gamma', type=float, default=0.1)
parser.add_argument('--sched_step', type=int, default=10)
parser.add_argument('--acceleration', type=float, default=8.0)
parser.add_argument('--noise_sigma', type=float, default=0.01)
parser.add_argument('--savepath',
default="/share/data/vision-greg2/users/gilton/celeba_equilibriumgrad_mri_save_inf.ckpt")
args = parser.parse_args()
# Parameters to modify
n_epochs = int(args.n_epochs)
current_epoch = 0
batch_size = int(args.batch_size)
n_channels = 2
max_iters = int(args.and_maxiters)
anderson_m = int(args.and_m)
anderson_beta = float(args.and_beta)
learning_rate = float(args.lr)
print_every_n_steps = 10
save_every_n_epochs = 5
initial_eta = float(args.etainit)
dataheight = 320
datawidth = 320
noise_sigma = float(args.noise_sigma)
# modify this for your machine
# save_location = "/share/data/vision-greg2/users/gilton/mnist_equilibriumgrad_blur.ckpt"
save_location = args.savepath
load_location = args.savepath
gpu_ids = []
for ii in range(6):
try:
torch.cuda.get_device_properties(ii)
print(str(ii), flush=True)
if not gpu_ids:
gpu_ids = [ii]
else:
gpu_ids.append(ii)
except AssertionError:
print('Not ' + str(ii) + "!", flush=True)
print(os.getenv('CUDA_VISIBLE_DEVICES'), flush=True)
gpu_ids = [int(x) for x in gpu_ids]
# device management
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
use_dataparallel = len(gpu_ids) > 1
print("GPU IDs: " + str([int(x) for x in gpu_ids]), flush=True)
# Set up data and dataloaders
data_location = "/share/data/vision-greg2/users/gilton/singlecoil_curated_clean/"
trainset_size = 2000
total_data = 2194
random.seed(10)
all_indices = list(range(trainset_size))
train_indices = random.sample(range(total_data), k=trainset_size)
dataset = singleCoilFastMRIDataloader(data_location, data_indices=train_indices)
dataloader = torch.utils.data.DataLoader(
dataset=dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
)
### Set up solver and problem setting
forward_operator = lin_operator.Identity().to(device=device)
measurement_process = OperatorPlusNoise(forward_operator, noise_sigma=noise_sigma).to(device=device)
solver = DnCNN(in_channels=n_channels, out_channels=n_channels, internal_channels=64,
num_of_layers=17, lip=1.0)
if use_dataparallel:
solver = nn.DataParallel(solver, device_ids=gpu_ids)
solver = solver.to(device=device)
start_epoch = 0
optimizer = optim.Adam(params=solver.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=int(args.sched_step), gamma=float(args.lr_gamma))
cpu_only = not torch.cuda.is_available()
if os.path.exists(load_location):
if not cpu_only:
saved_dict = torch.load(load_location)
else:
saved_dict = torch.load(load_location, map_location='cpu')
start_epoch = saved_dict['epoch']
solver.load_state_dict(saved_dict['solver_state_dict'])
# optimizer.load_state_dict(saved_dict['optimizer_state_dict'])
scheduler.load_state_dict(saved_dict['scheduler_state_dict'])
# set up loss and train
lossfunction = torch.nn.MSELoss()
# forward_iterator = eq_utils.anderson
# deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, m=anderson_m, beta=anderson_beta, lam=1e-6,
# max_iter=max_iters, tol=1e-8)
forward_iterator = eq_utils.forward_iteration
deep_eq_module = eq_utils.DEQFixedPoint(solver, forward_iterator, max_iter=100, tol=1e-8)
# Do train
denoiser_training.train_denoiser(denoising_net=solver, train_dataloader=dataloader, test_dataloader=dataloader,
measurement_process=measurement_process, optimizer=optimizer, save_location=save_location,
loss_function=lossfunction, n_epochs=n_epochs,
use_dataparallel=use_dataparallel, device=device, scheduler=scheduler,
print_every_n_steps=print_every_n_steps, save_every_n_epochs=save_every_n_epochs,
start_epoch=start_epoch)
| 5,199 | 36.681159 | 121 | py |
deep_equilibrium_inverse | deep_equilibrium_inverse-main/operators/operator.py | import torch
class LinearOperator(torch.nn.Module):
def __init__(self):
super(LinearOperator, self).__init__()
def forward(self, x):
pass
def adjoint(self, x):
pass
def gramian(self, x):
return self.adjoint(self.forward(x))
class SelfAdjointLinearOperator(LinearOperator):
def adjoint(self, x):
return self.forward(x)
class Identity(SelfAdjointLinearOperator):
def forward(self, x):
return x
class OperatorPlusNoise(torch.nn.Module):
def __init__(self, operator, noise_sigma):
super(OperatorPlusNoise, self).__init__()
self.internal_operator = operator
self.noise_sigma = noise_sigma
def forward(self, x):
A_x = self.internal_operator(x)
return A_x + self.noise_sigma * torch.randn_like(A_x) | 819 | 24.625 | 61 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.