Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- RAVE-main/annotator/__pycache__/annotator_path.cpython-38.pyc +0 -0
- RAVE-main/annotator/__pycache__/util.cpython-38.pyc +0 -0
- RAVE-main/annotator/canny/__init__.py +5 -0
- RAVE-main/annotator/canny/__pycache__/__init__.cpython-38.pyc +0 -0
- RAVE-main/annotator/lama/__init__.py +58 -0
- RAVE-main/annotator/lama/config.yaml +157 -0
- RAVE-main/annotator/lama/saicinpainting/__init__.py +0 -0
- RAVE-main/annotator/lama/saicinpainting/training/__init__.py +0 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/adversarial.py +177 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/constants.py +152 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/distance_weighting.py +126 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/feature_matching.py +33 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/perceptual.py +113 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/segmentation.py +43 -0
- RAVE-main/annotator/lama/saicinpainting/training/losses/style_loss.py +155 -0
- RAVE-main/annotator/lama/saicinpainting/training/modules/__init__.py +31 -0
- RAVE-main/annotator/lama/saicinpainting/utils.py +174 -0
- RAVE-main/annotator/mlsd/LICENSE +201 -0
- RAVE-main/annotator/mlsd/__init__.py +49 -0
- RAVE-main/annotator/mlsd/models/mbv2_mlsd_large.py +292 -0
- RAVE-main/annotator/mlsd/models/mbv2_mlsd_tiny.py +275 -0
- RAVE-main/annotator/mlsd/utils.py +581 -0
- RAVE-main/annotator/mmpkg/mmseg/apis/__init__.py +9 -0
- RAVE-main/annotator/mmpkg/mmseg/apis/inference.py +138 -0
- RAVE-main/annotator/mmpkg/mmseg/apis/test.py +238 -0
- RAVE-main/annotator/mmpkg/mmseg/apis/train.py +116 -0
- RAVE-main/annotator/mmpkg/mmseg/core/__init__.py +3 -0
- RAVE-main/annotator/mmpkg/mmseg/core/evaluation/__init__.py +8 -0
- RAVE-main/annotator/mmpkg/mmseg/core/evaluation/class_names.py +152 -0
- RAVE-main/annotator/mmpkg/mmseg/core/evaluation/eval_hooks.py +109 -0
- RAVE-main/annotator/mmpkg/mmseg/core/evaluation/metrics.py +326 -0
- RAVE-main/annotator/mmpkg/mmseg/core/seg/__init__.py +4 -0
- RAVE-main/annotator/mmpkg/mmseg/core/seg/builder.py +8 -0
- RAVE-main/annotator/mmpkg/mmseg/core/seg/sampler/__init__.py +4 -0
- RAVE-main/annotator/mmpkg/mmseg/core/seg/sampler/base_pixel_sampler.py +12 -0
- RAVE-main/annotator/mmpkg/mmseg/core/seg/sampler/ohem_pixel_sampler.py +76 -0
- RAVE-main/annotator/mmpkg/mmseg/core/utils/__init__.py +3 -0
- RAVE-main/annotator/mmpkg/mmseg/core/utils/misc.py +17 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/__init__.py +19 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/cityscapes.py +217 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/custom.py +403 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/dataset_wrappers.py +50 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/drive.py +27 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/pascal_context.py +103 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/__init__.py +16 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/compose.py +51 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/formating.py +288 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/loading.py +153 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/test_time_aug.py +133 -0
- RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/transforms.py +889 -0
RAVE-main/annotator/__pycache__/annotator_path.cpython-38.pyc
ADDED
|
Binary file (575 Bytes). View file
|
|
|
RAVE-main/annotator/__pycache__/util.cpython-38.pyc
ADDED
|
Binary file (2.08 kB). View file
|
|
|
RAVE-main/annotator/canny/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def apply_canny(img, low_threshold, high_threshold):
|
| 5 |
+
return cv2.Canny(img, low_threshold, high_threshold)
|
RAVE-main/annotator/canny/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (319 Bytes). View file
|
|
|
RAVE-main/annotator/lama/__init__.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# https://github.com/advimman/lama
|
| 2 |
+
|
| 3 |
+
import yaml
|
| 4 |
+
import torch
|
| 5 |
+
from omegaconf import OmegaConf
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
from einops import rearrange
|
| 9 |
+
import os
|
| 10 |
+
from modules import devices
|
| 11 |
+
from annotator.annotator_path import models_path
|
| 12 |
+
from annotator.lama.saicinpainting.training.trainers import load_checkpoint
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class LamaInpainting:
|
| 16 |
+
model_dir = os.path.join(models_path, "lama")
|
| 17 |
+
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.model = None
|
| 20 |
+
self.device = devices.get_device_for("controlnet")
|
| 21 |
+
|
| 22 |
+
def load_model(self):
|
| 23 |
+
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetLama.pth"
|
| 24 |
+
modelpath = os.path.join(self.model_dir, "ControlNetLama.pth")
|
| 25 |
+
if not os.path.exists(modelpath):
|
| 26 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 27 |
+
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
| 28 |
+
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.yaml')
|
| 29 |
+
cfg = yaml.safe_load(open(config_path, 'rt'))
|
| 30 |
+
cfg = OmegaConf.create(cfg)
|
| 31 |
+
cfg.training_model.predict_only = True
|
| 32 |
+
cfg.visualizer.kind = 'noop'
|
| 33 |
+
self.model = load_checkpoint(cfg, os.path.abspath(modelpath), strict=False, map_location='cpu')
|
| 34 |
+
self.model = self.model.to(self.device)
|
| 35 |
+
self.model.eval()
|
| 36 |
+
|
| 37 |
+
def unload_model(self):
|
| 38 |
+
if self.model is not None:
|
| 39 |
+
self.model.cpu()
|
| 40 |
+
|
| 41 |
+
def __call__(self, input_image):
|
| 42 |
+
if self.model is None:
|
| 43 |
+
self.load_model()
|
| 44 |
+
self.model.to(self.device)
|
| 45 |
+
color = np.ascontiguousarray(input_image[:, :, 0:3]).astype(np.float32) / 255.0
|
| 46 |
+
mask = np.ascontiguousarray(input_image[:, :, 3:4]).astype(np.float32) / 255.0
|
| 47 |
+
with torch.no_grad():
|
| 48 |
+
color = torch.from_numpy(color).float().to(self.device)
|
| 49 |
+
mask = torch.from_numpy(mask).float().to(self.device)
|
| 50 |
+
mask = (mask > 0.5).float()
|
| 51 |
+
color = color * (1 - mask)
|
| 52 |
+
image_feed = torch.cat([color, mask], dim=2)
|
| 53 |
+
image_feed = rearrange(image_feed, 'h w c -> 1 c h w')
|
| 54 |
+
result = self.model(image_feed)[0]
|
| 55 |
+
result = rearrange(result, 'c h w -> h w c')
|
| 56 |
+
result = result * mask + color * (1 - mask)
|
| 57 |
+
result *= 255.0
|
| 58 |
+
return result.detach().cpu().numpy().clip(0, 255).astype(np.uint8)
|
RAVE-main/annotator/lama/config.yaml
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
run_title: b18_ffc075_batch8x15
|
| 2 |
+
training_model:
|
| 3 |
+
kind: default
|
| 4 |
+
visualize_each_iters: 1000
|
| 5 |
+
concat_mask: true
|
| 6 |
+
store_discr_outputs_for_vis: true
|
| 7 |
+
losses:
|
| 8 |
+
l1:
|
| 9 |
+
weight_missing: 0
|
| 10 |
+
weight_known: 10
|
| 11 |
+
perceptual:
|
| 12 |
+
weight: 0
|
| 13 |
+
adversarial:
|
| 14 |
+
kind: r1
|
| 15 |
+
weight: 10
|
| 16 |
+
gp_coef: 0.001
|
| 17 |
+
mask_as_fake_target: true
|
| 18 |
+
allow_scale_mask: true
|
| 19 |
+
feature_matching:
|
| 20 |
+
weight: 100
|
| 21 |
+
resnet_pl:
|
| 22 |
+
weight: 30
|
| 23 |
+
weights_path: ${env:TORCH_HOME}
|
| 24 |
+
|
| 25 |
+
optimizers:
|
| 26 |
+
generator:
|
| 27 |
+
kind: adam
|
| 28 |
+
lr: 0.001
|
| 29 |
+
discriminator:
|
| 30 |
+
kind: adam
|
| 31 |
+
lr: 0.0001
|
| 32 |
+
visualizer:
|
| 33 |
+
key_order:
|
| 34 |
+
- image
|
| 35 |
+
- predicted_image
|
| 36 |
+
- discr_output_fake
|
| 37 |
+
- discr_output_real
|
| 38 |
+
- inpainted
|
| 39 |
+
rescale_keys:
|
| 40 |
+
- discr_output_fake
|
| 41 |
+
- discr_output_real
|
| 42 |
+
kind: directory
|
| 43 |
+
outdir: /group-volume/User-Driven-Content-Generation/r.suvorov/inpainting/experiments/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/samples
|
| 44 |
+
location:
|
| 45 |
+
data_root_dir: /group-volume/User-Driven-Content-Generation/datasets/inpainting_data_root_large
|
| 46 |
+
out_root_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/experiments
|
| 47 |
+
tb_dir: /group-volume/User-Driven-Content-Generation/${env:USER}/inpainting/tb_logs
|
| 48 |
+
data:
|
| 49 |
+
batch_size: 15
|
| 50 |
+
val_batch_size: 2
|
| 51 |
+
num_workers: 3
|
| 52 |
+
train:
|
| 53 |
+
indir: ${location.data_root_dir}/train
|
| 54 |
+
out_size: 256
|
| 55 |
+
mask_gen_kwargs:
|
| 56 |
+
irregular_proba: 1
|
| 57 |
+
irregular_kwargs:
|
| 58 |
+
max_angle: 4
|
| 59 |
+
max_len: 200
|
| 60 |
+
max_width: 100
|
| 61 |
+
max_times: 5
|
| 62 |
+
min_times: 1
|
| 63 |
+
box_proba: 1
|
| 64 |
+
box_kwargs:
|
| 65 |
+
margin: 10
|
| 66 |
+
bbox_min_size: 30
|
| 67 |
+
bbox_max_size: 150
|
| 68 |
+
max_times: 3
|
| 69 |
+
min_times: 1
|
| 70 |
+
segm_proba: 0
|
| 71 |
+
segm_kwargs:
|
| 72 |
+
confidence_threshold: 0.5
|
| 73 |
+
max_object_area: 0.5
|
| 74 |
+
min_mask_area: 0.07
|
| 75 |
+
downsample_levels: 6
|
| 76 |
+
num_variants_per_mask: 1
|
| 77 |
+
rigidness_mode: 1
|
| 78 |
+
max_foreground_coverage: 0.3
|
| 79 |
+
max_foreground_intersection: 0.7
|
| 80 |
+
max_mask_intersection: 0.1
|
| 81 |
+
max_hidden_area: 0.1
|
| 82 |
+
max_scale_change: 0.25
|
| 83 |
+
horizontal_flip: true
|
| 84 |
+
max_vertical_shift: 0.2
|
| 85 |
+
position_shuffle: true
|
| 86 |
+
transform_variant: distortions
|
| 87 |
+
dataloader_kwargs:
|
| 88 |
+
batch_size: ${data.batch_size}
|
| 89 |
+
shuffle: true
|
| 90 |
+
num_workers: ${data.num_workers}
|
| 91 |
+
val:
|
| 92 |
+
indir: ${location.data_root_dir}/val
|
| 93 |
+
img_suffix: .png
|
| 94 |
+
dataloader_kwargs:
|
| 95 |
+
batch_size: ${data.val_batch_size}
|
| 96 |
+
shuffle: false
|
| 97 |
+
num_workers: ${data.num_workers}
|
| 98 |
+
visual_test:
|
| 99 |
+
indir: ${location.data_root_dir}/korean_test
|
| 100 |
+
img_suffix: _input.png
|
| 101 |
+
pad_out_to_modulo: 32
|
| 102 |
+
dataloader_kwargs:
|
| 103 |
+
batch_size: 1
|
| 104 |
+
shuffle: false
|
| 105 |
+
num_workers: ${data.num_workers}
|
| 106 |
+
generator:
|
| 107 |
+
kind: ffc_resnet
|
| 108 |
+
input_nc: 4
|
| 109 |
+
output_nc: 3
|
| 110 |
+
ngf: 64
|
| 111 |
+
n_downsampling: 3
|
| 112 |
+
n_blocks: 18
|
| 113 |
+
add_out_act: sigmoid
|
| 114 |
+
init_conv_kwargs:
|
| 115 |
+
ratio_gin: 0
|
| 116 |
+
ratio_gout: 0
|
| 117 |
+
enable_lfu: false
|
| 118 |
+
downsample_conv_kwargs:
|
| 119 |
+
ratio_gin: ${generator.init_conv_kwargs.ratio_gout}
|
| 120 |
+
ratio_gout: ${generator.downsample_conv_kwargs.ratio_gin}
|
| 121 |
+
enable_lfu: false
|
| 122 |
+
resnet_conv_kwargs:
|
| 123 |
+
ratio_gin: 0.75
|
| 124 |
+
ratio_gout: ${generator.resnet_conv_kwargs.ratio_gin}
|
| 125 |
+
enable_lfu: false
|
| 126 |
+
discriminator:
|
| 127 |
+
kind: pix2pixhd_nlayer
|
| 128 |
+
input_nc: 3
|
| 129 |
+
ndf: 64
|
| 130 |
+
n_layers: 4
|
| 131 |
+
evaluator:
|
| 132 |
+
kind: default
|
| 133 |
+
inpainted_key: inpainted
|
| 134 |
+
integral_kind: ssim_fid100_f1
|
| 135 |
+
trainer:
|
| 136 |
+
kwargs:
|
| 137 |
+
gpus: -1
|
| 138 |
+
accelerator: ddp
|
| 139 |
+
max_epochs: 200
|
| 140 |
+
gradient_clip_val: 1
|
| 141 |
+
log_gpu_memory: None
|
| 142 |
+
limit_train_batches: 25000
|
| 143 |
+
val_check_interval: ${trainer.kwargs.limit_train_batches}
|
| 144 |
+
log_every_n_steps: 1000
|
| 145 |
+
precision: 32
|
| 146 |
+
terminate_on_nan: false
|
| 147 |
+
check_val_every_n_epoch: 1
|
| 148 |
+
num_sanity_val_steps: 8
|
| 149 |
+
limit_val_batches: 1000
|
| 150 |
+
replace_sampler_ddp: false
|
| 151 |
+
checkpoint_kwargs:
|
| 152 |
+
verbose: true
|
| 153 |
+
save_top_k: 5
|
| 154 |
+
save_last: true
|
| 155 |
+
period: 1
|
| 156 |
+
monitor: val_ssim_fid100_f1_total_mean
|
| 157 |
+
mode: max
|
RAVE-main/annotator/lama/saicinpainting/__init__.py
ADDED
|
File without changes
|
RAVE-main/annotator/lama/saicinpainting/training/__init__.py
ADDED
|
File without changes
|
RAVE-main/annotator/lama/saicinpainting/training/losses/adversarial.py
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Tuple, Dict, Optional
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class BaseAdversarialLoss:
|
| 9 |
+
def pre_generator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 10 |
+
generator: nn.Module, discriminator: nn.Module):
|
| 11 |
+
"""
|
| 12 |
+
Prepare for generator step
|
| 13 |
+
:param real_batch: Tensor, a batch of real samples
|
| 14 |
+
:param fake_batch: Tensor, a batch of samples produced by generator
|
| 15 |
+
:param generator:
|
| 16 |
+
:param discriminator:
|
| 17 |
+
:return: None
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 21 |
+
generator: nn.Module, discriminator: nn.Module):
|
| 22 |
+
"""
|
| 23 |
+
Prepare for discriminator step
|
| 24 |
+
:param real_batch: Tensor, a batch of real samples
|
| 25 |
+
:param fake_batch: Tensor, a batch of samples produced by generator
|
| 26 |
+
:param generator:
|
| 27 |
+
:param discriminator:
|
| 28 |
+
:return: None
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 32 |
+
discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor,
|
| 33 |
+
mask: Optional[torch.Tensor] = None) \
|
| 34 |
+
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 35 |
+
"""
|
| 36 |
+
Calculate generator loss
|
| 37 |
+
:param real_batch: Tensor, a batch of real samples
|
| 38 |
+
:param fake_batch: Tensor, a batch of samples produced by generator
|
| 39 |
+
:param discr_real_pred: Tensor, discriminator output for real_batch
|
| 40 |
+
:param discr_fake_pred: Tensor, discriminator output for fake_batch
|
| 41 |
+
:param mask: Tensor, actual mask, which was at input of generator when making fake_batch
|
| 42 |
+
:return: total generator loss along with some values that might be interesting to log
|
| 43 |
+
"""
|
| 44 |
+
raise NotImplemented()
|
| 45 |
+
|
| 46 |
+
def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 47 |
+
discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor,
|
| 48 |
+
mask: Optional[torch.Tensor] = None) \
|
| 49 |
+
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 50 |
+
"""
|
| 51 |
+
Calculate discriminator loss and call .backward() on it
|
| 52 |
+
:param real_batch: Tensor, a batch of real samples
|
| 53 |
+
:param fake_batch: Tensor, a batch of samples produced by generator
|
| 54 |
+
:param discr_real_pred: Tensor, discriminator output for real_batch
|
| 55 |
+
:param discr_fake_pred: Tensor, discriminator output for fake_batch
|
| 56 |
+
:param mask: Tensor, actual mask, which was at input of generator when making fake_batch
|
| 57 |
+
:return: total discriminator loss along with some values that might be interesting to log
|
| 58 |
+
"""
|
| 59 |
+
raise NotImplemented()
|
| 60 |
+
|
| 61 |
+
def interpolate_mask(self, mask, shape):
|
| 62 |
+
assert mask is not None
|
| 63 |
+
assert self.allow_scale_mask or shape == mask.shape[-2:]
|
| 64 |
+
if shape != mask.shape[-2:] and self.allow_scale_mask:
|
| 65 |
+
if self.mask_scale_mode == 'maxpool':
|
| 66 |
+
mask = F.adaptive_max_pool2d(mask, shape)
|
| 67 |
+
else:
|
| 68 |
+
mask = F.interpolate(mask, size=shape, mode=self.mask_scale_mode)
|
| 69 |
+
return mask
|
| 70 |
+
|
| 71 |
+
def make_r1_gp(discr_real_pred, real_batch):
|
| 72 |
+
if torch.is_grad_enabled():
|
| 73 |
+
grad_real = torch.autograd.grad(outputs=discr_real_pred.sum(), inputs=real_batch, create_graph=True)[0]
|
| 74 |
+
grad_penalty = (grad_real.view(grad_real.shape[0], -1).norm(2, dim=1) ** 2).mean()
|
| 75 |
+
else:
|
| 76 |
+
grad_penalty = 0
|
| 77 |
+
real_batch.requires_grad = False
|
| 78 |
+
|
| 79 |
+
return grad_penalty
|
| 80 |
+
|
| 81 |
+
class NonSaturatingWithR1(BaseAdversarialLoss):
|
| 82 |
+
def __init__(self, gp_coef=5, weight=1, mask_as_fake_target=False, allow_scale_mask=False,
|
| 83 |
+
mask_scale_mode='nearest', extra_mask_weight_for_gen=0,
|
| 84 |
+
use_unmasked_for_gen=True, use_unmasked_for_discr=True):
|
| 85 |
+
self.gp_coef = gp_coef
|
| 86 |
+
self.weight = weight
|
| 87 |
+
# use for discr => use for gen;
|
| 88 |
+
# otherwise we teach only the discr to pay attention to very small difference
|
| 89 |
+
assert use_unmasked_for_gen or (not use_unmasked_for_discr)
|
| 90 |
+
# mask as target => use unmasked for discr:
|
| 91 |
+
# if we don't care about unmasked regions at all
|
| 92 |
+
# then it doesn't matter if the value of mask_as_fake_target is true or false
|
| 93 |
+
assert use_unmasked_for_discr or (not mask_as_fake_target)
|
| 94 |
+
self.use_unmasked_for_gen = use_unmasked_for_gen
|
| 95 |
+
self.use_unmasked_for_discr = use_unmasked_for_discr
|
| 96 |
+
self.mask_as_fake_target = mask_as_fake_target
|
| 97 |
+
self.allow_scale_mask = allow_scale_mask
|
| 98 |
+
self.mask_scale_mode = mask_scale_mode
|
| 99 |
+
self.extra_mask_weight_for_gen = extra_mask_weight_for_gen
|
| 100 |
+
|
| 101 |
+
def generator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 102 |
+
discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor,
|
| 103 |
+
mask=None) \
|
| 104 |
+
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 105 |
+
fake_loss = F.softplus(-discr_fake_pred)
|
| 106 |
+
if (self.mask_as_fake_target and self.extra_mask_weight_for_gen > 0) or \
|
| 107 |
+
not self.use_unmasked_for_gen: # == if masked region should be treated differently
|
| 108 |
+
mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:])
|
| 109 |
+
if not self.use_unmasked_for_gen:
|
| 110 |
+
fake_loss = fake_loss * mask
|
| 111 |
+
else:
|
| 112 |
+
pixel_weights = 1 + mask * self.extra_mask_weight_for_gen
|
| 113 |
+
fake_loss = fake_loss * pixel_weights
|
| 114 |
+
|
| 115 |
+
return fake_loss.mean() * self.weight, dict()
|
| 116 |
+
|
| 117 |
+
def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 118 |
+
generator: nn.Module, discriminator: nn.Module):
|
| 119 |
+
real_batch.requires_grad = True
|
| 120 |
+
|
| 121 |
+
def discriminator_loss(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 122 |
+
discr_real_pred: torch.Tensor, discr_fake_pred: torch.Tensor,
|
| 123 |
+
mask=None) \
|
| 124 |
+
-> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 125 |
+
|
| 126 |
+
real_loss = F.softplus(-discr_real_pred)
|
| 127 |
+
grad_penalty = make_r1_gp(discr_real_pred, real_batch) * self.gp_coef
|
| 128 |
+
fake_loss = F.softplus(discr_fake_pred)
|
| 129 |
+
|
| 130 |
+
if not self.use_unmasked_for_discr or self.mask_as_fake_target:
|
| 131 |
+
# == if masked region should be treated differently
|
| 132 |
+
mask = self.interpolate_mask(mask, discr_fake_pred.shape[-2:])
|
| 133 |
+
# use_unmasked_for_discr=False only makes sense for fakes;
|
| 134 |
+
# for reals there is no difference beetween two regions
|
| 135 |
+
fake_loss = fake_loss * mask
|
| 136 |
+
if self.mask_as_fake_target:
|
| 137 |
+
fake_loss = fake_loss + (1 - mask) * F.softplus(-discr_fake_pred)
|
| 138 |
+
|
| 139 |
+
sum_discr_loss = real_loss + grad_penalty + fake_loss
|
| 140 |
+
metrics = dict(discr_real_out=discr_real_pred.mean(),
|
| 141 |
+
discr_fake_out=discr_fake_pred.mean(),
|
| 142 |
+
discr_real_gp=grad_penalty)
|
| 143 |
+
return sum_discr_loss.mean(), metrics
|
| 144 |
+
|
| 145 |
+
class BCELoss(BaseAdversarialLoss):
|
| 146 |
+
def __init__(self, weight):
|
| 147 |
+
self.weight = weight
|
| 148 |
+
self.bce_loss = nn.BCEWithLogitsLoss()
|
| 149 |
+
|
| 150 |
+
def generator_loss(self, discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 151 |
+
real_mask_gt = torch.zeros(discr_fake_pred.shape).to(discr_fake_pred.device)
|
| 152 |
+
fake_loss = self.bce_loss(discr_fake_pred, real_mask_gt) * self.weight
|
| 153 |
+
return fake_loss, dict()
|
| 154 |
+
|
| 155 |
+
def pre_discriminator_step(self, real_batch: torch.Tensor, fake_batch: torch.Tensor,
|
| 156 |
+
generator: nn.Module, discriminator: nn.Module):
|
| 157 |
+
real_batch.requires_grad = True
|
| 158 |
+
|
| 159 |
+
def discriminator_loss(self,
|
| 160 |
+
mask: torch.Tensor,
|
| 161 |
+
discr_real_pred: torch.Tensor,
|
| 162 |
+
discr_fake_pred: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
|
| 163 |
+
|
| 164 |
+
real_mask_gt = torch.zeros(discr_real_pred.shape).to(discr_real_pred.device)
|
| 165 |
+
sum_discr_loss = (self.bce_loss(discr_real_pred, real_mask_gt) + self.bce_loss(discr_fake_pred, mask)) / 2
|
| 166 |
+
metrics = dict(discr_real_out=discr_real_pred.mean(),
|
| 167 |
+
discr_fake_out=discr_fake_pred.mean(),
|
| 168 |
+
discr_real_gp=0)
|
| 169 |
+
return sum_discr_loss, metrics
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def make_discrim_loss(kind, **kwargs):
|
| 173 |
+
if kind == 'r1':
|
| 174 |
+
return NonSaturatingWithR1(**kwargs)
|
| 175 |
+
elif kind == 'bce':
|
| 176 |
+
return BCELoss(**kwargs)
|
| 177 |
+
raise ValueError(f'Unknown adversarial loss kind {kind}')
|
RAVE-main/annotator/lama/saicinpainting/training/losses/constants.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
weights = {"ade20k":
|
| 2 |
+
[6.34517766497462,
|
| 3 |
+
9.328358208955224,
|
| 4 |
+
11.389521640091116,
|
| 5 |
+
16.10305958132045,
|
| 6 |
+
20.833333333333332,
|
| 7 |
+
22.22222222222222,
|
| 8 |
+
25.125628140703515,
|
| 9 |
+
43.29004329004329,
|
| 10 |
+
50.5050505050505,
|
| 11 |
+
54.6448087431694,
|
| 12 |
+
55.24861878453038,
|
| 13 |
+
60.24096385542168,
|
| 14 |
+
62.5,
|
| 15 |
+
66.2251655629139,
|
| 16 |
+
84.74576271186442,
|
| 17 |
+
90.90909090909092,
|
| 18 |
+
91.74311926605505,
|
| 19 |
+
96.15384615384616,
|
| 20 |
+
96.15384615384616,
|
| 21 |
+
97.08737864077669,
|
| 22 |
+
102.04081632653062,
|
| 23 |
+
135.13513513513513,
|
| 24 |
+
149.2537313432836,
|
| 25 |
+
153.84615384615384,
|
| 26 |
+
163.93442622950818,
|
| 27 |
+
166.66666666666666,
|
| 28 |
+
188.67924528301887,
|
| 29 |
+
192.30769230769232,
|
| 30 |
+
217.3913043478261,
|
| 31 |
+
227.27272727272725,
|
| 32 |
+
227.27272727272725,
|
| 33 |
+
227.27272727272725,
|
| 34 |
+
303.03030303030306,
|
| 35 |
+
322.5806451612903,
|
| 36 |
+
333.3333333333333,
|
| 37 |
+
370.3703703703703,
|
| 38 |
+
384.61538461538464,
|
| 39 |
+
416.6666666666667,
|
| 40 |
+
416.6666666666667,
|
| 41 |
+
434.7826086956522,
|
| 42 |
+
434.7826086956522,
|
| 43 |
+
454.5454545454545,
|
| 44 |
+
454.5454545454545,
|
| 45 |
+
500.0,
|
| 46 |
+
526.3157894736842,
|
| 47 |
+
526.3157894736842,
|
| 48 |
+
555.5555555555555,
|
| 49 |
+
555.5555555555555,
|
| 50 |
+
555.5555555555555,
|
| 51 |
+
555.5555555555555,
|
| 52 |
+
555.5555555555555,
|
| 53 |
+
555.5555555555555,
|
| 54 |
+
555.5555555555555,
|
| 55 |
+
588.2352941176471,
|
| 56 |
+
588.2352941176471,
|
| 57 |
+
588.2352941176471,
|
| 58 |
+
588.2352941176471,
|
| 59 |
+
588.2352941176471,
|
| 60 |
+
666.6666666666666,
|
| 61 |
+
666.6666666666666,
|
| 62 |
+
666.6666666666666,
|
| 63 |
+
666.6666666666666,
|
| 64 |
+
714.2857142857143,
|
| 65 |
+
714.2857142857143,
|
| 66 |
+
714.2857142857143,
|
| 67 |
+
714.2857142857143,
|
| 68 |
+
714.2857142857143,
|
| 69 |
+
769.2307692307693,
|
| 70 |
+
769.2307692307693,
|
| 71 |
+
769.2307692307693,
|
| 72 |
+
833.3333333333334,
|
| 73 |
+
833.3333333333334,
|
| 74 |
+
833.3333333333334,
|
| 75 |
+
833.3333333333334,
|
| 76 |
+
909.090909090909,
|
| 77 |
+
1000.0,
|
| 78 |
+
1111.111111111111,
|
| 79 |
+
1111.111111111111,
|
| 80 |
+
1111.111111111111,
|
| 81 |
+
1111.111111111111,
|
| 82 |
+
1111.111111111111,
|
| 83 |
+
1250.0,
|
| 84 |
+
1250.0,
|
| 85 |
+
1250.0,
|
| 86 |
+
1250.0,
|
| 87 |
+
1250.0,
|
| 88 |
+
1428.5714285714287,
|
| 89 |
+
1428.5714285714287,
|
| 90 |
+
1428.5714285714287,
|
| 91 |
+
1428.5714285714287,
|
| 92 |
+
1428.5714285714287,
|
| 93 |
+
1428.5714285714287,
|
| 94 |
+
1428.5714285714287,
|
| 95 |
+
1666.6666666666667,
|
| 96 |
+
1666.6666666666667,
|
| 97 |
+
1666.6666666666667,
|
| 98 |
+
1666.6666666666667,
|
| 99 |
+
1666.6666666666667,
|
| 100 |
+
1666.6666666666667,
|
| 101 |
+
1666.6666666666667,
|
| 102 |
+
1666.6666666666667,
|
| 103 |
+
1666.6666666666667,
|
| 104 |
+
1666.6666666666667,
|
| 105 |
+
1666.6666666666667,
|
| 106 |
+
2000.0,
|
| 107 |
+
2000.0,
|
| 108 |
+
2000.0,
|
| 109 |
+
2000.0,
|
| 110 |
+
2000.0,
|
| 111 |
+
2000.0,
|
| 112 |
+
2000.0,
|
| 113 |
+
2000.0,
|
| 114 |
+
2000.0,
|
| 115 |
+
2000.0,
|
| 116 |
+
2000.0,
|
| 117 |
+
2000.0,
|
| 118 |
+
2000.0,
|
| 119 |
+
2000.0,
|
| 120 |
+
2000.0,
|
| 121 |
+
2000.0,
|
| 122 |
+
2000.0,
|
| 123 |
+
2500.0,
|
| 124 |
+
2500.0,
|
| 125 |
+
2500.0,
|
| 126 |
+
2500.0,
|
| 127 |
+
2500.0,
|
| 128 |
+
2500.0,
|
| 129 |
+
2500.0,
|
| 130 |
+
2500.0,
|
| 131 |
+
2500.0,
|
| 132 |
+
2500.0,
|
| 133 |
+
2500.0,
|
| 134 |
+
2500.0,
|
| 135 |
+
2500.0,
|
| 136 |
+
3333.3333333333335,
|
| 137 |
+
3333.3333333333335,
|
| 138 |
+
3333.3333333333335,
|
| 139 |
+
3333.3333333333335,
|
| 140 |
+
3333.3333333333335,
|
| 141 |
+
3333.3333333333335,
|
| 142 |
+
3333.3333333333335,
|
| 143 |
+
3333.3333333333335,
|
| 144 |
+
3333.3333333333335,
|
| 145 |
+
3333.3333333333335,
|
| 146 |
+
3333.3333333333335,
|
| 147 |
+
3333.3333333333335,
|
| 148 |
+
3333.3333333333335,
|
| 149 |
+
5000.0,
|
| 150 |
+
5000.0,
|
| 151 |
+
5000.0]
|
| 152 |
+
}
|
RAVE-main/annotator/lama/saicinpainting/training/losses/distance_weighting.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import torchvision
|
| 5 |
+
|
| 6 |
+
from annotator.lama.saicinpainting.training.losses.perceptual import IMAGENET_STD, IMAGENET_MEAN
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def dummy_distance_weighter(real_img, pred_img, mask):
|
| 10 |
+
return mask
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_gauss_kernel(kernel_size, width_factor=1):
|
| 14 |
+
coords = torch.stack(torch.meshgrid(torch.arange(kernel_size),
|
| 15 |
+
torch.arange(kernel_size)),
|
| 16 |
+
dim=0).float()
|
| 17 |
+
diff = torch.exp(-((coords - kernel_size // 2) ** 2).sum(0) / kernel_size / width_factor)
|
| 18 |
+
diff /= diff.sum()
|
| 19 |
+
return diff
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class BlurMask(nn.Module):
|
| 23 |
+
def __init__(self, kernel_size=5, width_factor=1):
|
| 24 |
+
super().__init__()
|
| 25 |
+
self.filter = nn.Conv2d(1, 1, kernel_size, padding=kernel_size // 2, padding_mode='replicate', bias=False)
|
| 26 |
+
self.filter.weight.data.copy_(get_gauss_kernel(kernel_size, width_factor=width_factor))
|
| 27 |
+
|
| 28 |
+
def forward(self, real_img, pred_img, mask):
|
| 29 |
+
with torch.no_grad():
|
| 30 |
+
result = self.filter(mask) * mask
|
| 31 |
+
return result
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class EmulatedEDTMask(nn.Module):
|
| 35 |
+
def __init__(self, dilate_kernel_size=5, blur_kernel_size=5, width_factor=1):
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.dilate_filter = nn.Conv2d(1, 1, dilate_kernel_size, padding=dilate_kernel_size// 2, padding_mode='replicate',
|
| 38 |
+
bias=False)
|
| 39 |
+
self.dilate_filter.weight.data.copy_(torch.ones(1, 1, dilate_kernel_size, dilate_kernel_size, dtype=torch.float))
|
| 40 |
+
self.blur_filter = nn.Conv2d(1, 1, blur_kernel_size, padding=blur_kernel_size // 2, padding_mode='replicate', bias=False)
|
| 41 |
+
self.blur_filter.weight.data.copy_(get_gauss_kernel(blur_kernel_size, width_factor=width_factor))
|
| 42 |
+
|
| 43 |
+
def forward(self, real_img, pred_img, mask):
|
| 44 |
+
with torch.no_grad():
|
| 45 |
+
known_mask = 1 - mask
|
| 46 |
+
dilated_known_mask = (self.dilate_filter(known_mask) > 1).float()
|
| 47 |
+
result = self.blur_filter(1 - dilated_known_mask) * mask
|
| 48 |
+
return result
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class PropagatePerceptualSim(nn.Module):
|
| 52 |
+
def __init__(self, level=2, max_iters=10, temperature=500, erode_mask_size=3):
|
| 53 |
+
super().__init__()
|
| 54 |
+
vgg = torchvision.models.vgg19(pretrained=True).features
|
| 55 |
+
vgg_avg_pooling = []
|
| 56 |
+
|
| 57 |
+
for weights in vgg.parameters():
|
| 58 |
+
weights.requires_grad = False
|
| 59 |
+
|
| 60 |
+
cur_level_i = 0
|
| 61 |
+
for module in vgg.modules():
|
| 62 |
+
if module.__class__.__name__ == 'Sequential':
|
| 63 |
+
continue
|
| 64 |
+
elif module.__class__.__name__ == 'MaxPool2d':
|
| 65 |
+
vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
|
| 66 |
+
else:
|
| 67 |
+
vgg_avg_pooling.append(module)
|
| 68 |
+
if module.__class__.__name__ == 'ReLU':
|
| 69 |
+
cur_level_i += 1
|
| 70 |
+
if cur_level_i == level:
|
| 71 |
+
break
|
| 72 |
+
|
| 73 |
+
self.features = nn.Sequential(*vgg_avg_pooling)
|
| 74 |
+
|
| 75 |
+
self.max_iters = max_iters
|
| 76 |
+
self.temperature = temperature
|
| 77 |
+
self.do_erode = erode_mask_size > 0
|
| 78 |
+
if self.do_erode:
|
| 79 |
+
self.erode_mask = nn.Conv2d(1, 1, erode_mask_size, padding=erode_mask_size // 2, bias=False)
|
| 80 |
+
self.erode_mask.weight.data.fill_(1)
|
| 81 |
+
|
| 82 |
+
def forward(self, real_img, pred_img, mask):
|
| 83 |
+
with torch.no_grad():
|
| 84 |
+
real_img = (real_img - IMAGENET_MEAN.to(real_img)) / IMAGENET_STD.to(real_img)
|
| 85 |
+
real_feats = self.features(real_img)
|
| 86 |
+
|
| 87 |
+
vertical_sim = torch.exp(-(real_feats[:, :, 1:] - real_feats[:, :, :-1]).pow(2).sum(1, keepdim=True)
|
| 88 |
+
/ self.temperature)
|
| 89 |
+
horizontal_sim = torch.exp(-(real_feats[:, :, :, 1:] - real_feats[:, :, :, :-1]).pow(2).sum(1, keepdim=True)
|
| 90 |
+
/ self.temperature)
|
| 91 |
+
|
| 92 |
+
mask_scaled = F.interpolate(mask, size=real_feats.shape[-2:], mode='bilinear', align_corners=False)
|
| 93 |
+
if self.do_erode:
|
| 94 |
+
mask_scaled = (self.erode_mask(mask_scaled) > 1).float()
|
| 95 |
+
|
| 96 |
+
cur_knowness = 1 - mask_scaled
|
| 97 |
+
|
| 98 |
+
for iter_i in range(self.max_iters):
|
| 99 |
+
new_top_knowness = F.pad(cur_knowness[:, :, :-1] * vertical_sim, (0, 0, 1, 0), mode='replicate')
|
| 100 |
+
new_bottom_knowness = F.pad(cur_knowness[:, :, 1:] * vertical_sim, (0, 0, 0, 1), mode='replicate')
|
| 101 |
+
|
| 102 |
+
new_left_knowness = F.pad(cur_knowness[:, :, :, :-1] * horizontal_sim, (1, 0, 0, 0), mode='replicate')
|
| 103 |
+
new_right_knowness = F.pad(cur_knowness[:, :, :, 1:] * horizontal_sim, (0, 1, 0, 0), mode='replicate')
|
| 104 |
+
|
| 105 |
+
new_knowness = torch.stack([new_top_knowness, new_bottom_knowness,
|
| 106 |
+
new_left_knowness, new_right_knowness],
|
| 107 |
+
dim=0).max(0).values
|
| 108 |
+
|
| 109 |
+
cur_knowness = torch.max(cur_knowness, new_knowness)
|
| 110 |
+
|
| 111 |
+
cur_knowness = F.interpolate(cur_knowness, size=mask.shape[-2:], mode='bilinear')
|
| 112 |
+
result = torch.min(mask, 1 - cur_knowness)
|
| 113 |
+
|
| 114 |
+
return result
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def make_mask_distance_weighter(kind='none', **kwargs):
|
| 118 |
+
if kind == 'none':
|
| 119 |
+
return dummy_distance_weighter
|
| 120 |
+
if kind == 'blur':
|
| 121 |
+
return BlurMask(**kwargs)
|
| 122 |
+
if kind == 'edt':
|
| 123 |
+
return EmulatedEDTMask(**kwargs)
|
| 124 |
+
if kind == 'pps':
|
| 125 |
+
return PropagatePerceptualSim(**kwargs)
|
| 126 |
+
raise ValueError(f'Unknown mask distance weighter kind {kind}')
|
RAVE-main/annotator/lama/saicinpainting/training/losses/feature_matching.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def masked_l2_loss(pred, target, mask, weight_known, weight_missing):
|
| 8 |
+
per_pixel_l2 = F.mse_loss(pred, target, reduction='none')
|
| 9 |
+
pixel_weights = mask * weight_missing + (1 - mask) * weight_known
|
| 10 |
+
return (pixel_weights * per_pixel_l2).mean()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def masked_l1_loss(pred, target, mask, weight_known, weight_missing):
|
| 14 |
+
per_pixel_l1 = F.l1_loss(pred, target, reduction='none')
|
| 15 |
+
pixel_weights = mask * weight_missing + (1 - mask) * weight_known
|
| 16 |
+
return (pixel_weights * per_pixel_l1).mean()
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def feature_matching_loss(fake_features: List[torch.Tensor], target_features: List[torch.Tensor], mask=None):
|
| 20 |
+
if mask is None:
|
| 21 |
+
res = torch.stack([F.mse_loss(fake_feat, target_feat)
|
| 22 |
+
for fake_feat, target_feat in zip(fake_features, target_features)]).mean()
|
| 23 |
+
else:
|
| 24 |
+
res = 0
|
| 25 |
+
norm = 0
|
| 26 |
+
for fake_feat, target_feat in zip(fake_features, target_features):
|
| 27 |
+
cur_mask = F.interpolate(mask, size=fake_feat.shape[-2:], mode='bilinear', align_corners=False)
|
| 28 |
+
error_weights = 1 - cur_mask
|
| 29 |
+
cur_val = ((fake_feat - target_feat).pow(2) * error_weights).mean()
|
| 30 |
+
res = res + cur_val
|
| 31 |
+
norm += 1
|
| 32 |
+
res = res / norm
|
| 33 |
+
return res
|
RAVE-main/annotator/lama/saicinpainting/training/losses/perceptual.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import torchvision
|
| 5 |
+
|
| 6 |
+
# from models.ade20k import ModelBuilder
|
| 7 |
+
from annotator.lama.saicinpainting.utils import check_and_warn_input_range
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
IMAGENET_MEAN = torch.FloatTensor([0.485, 0.456, 0.406])[None, :, None, None]
|
| 11 |
+
IMAGENET_STD = torch.FloatTensor([0.229, 0.224, 0.225])[None, :, None, None]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class PerceptualLoss(nn.Module):
|
| 15 |
+
def __init__(self, normalize_inputs=True):
|
| 16 |
+
super(PerceptualLoss, self).__init__()
|
| 17 |
+
|
| 18 |
+
self.normalize_inputs = normalize_inputs
|
| 19 |
+
self.mean_ = IMAGENET_MEAN
|
| 20 |
+
self.std_ = IMAGENET_STD
|
| 21 |
+
|
| 22 |
+
vgg = torchvision.models.vgg19(pretrained=True).features
|
| 23 |
+
vgg_avg_pooling = []
|
| 24 |
+
|
| 25 |
+
for weights in vgg.parameters():
|
| 26 |
+
weights.requires_grad = False
|
| 27 |
+
|
| 28 |
+
for module in vgg.modules():
|
| 29 |
+
if module.__class__.__name__ == 'Sequential':
|
| 30 |
+
continue
|
| 31 |
+
elif module.__class__.__name__ == 'MaxPool2d':
|
| 32 |
+
vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
|
| 33 |
+
else:
|
| 34 |
+
vgg_avg_pooling.append(module)
|
| 35 |
+
|
| 36 |
+
self.vgg = nn.Sequential(*vgg_avg_pooling)
|
| 37 |
+
|
| 38 |
+
def do_normalize_inputs(self, x):
|
| 39 |
+
return (x - self.mean_.to(x.device)) / self.std_.to(x.device)
|
| 40 |
+
|
| 41 |
+
def partial_losses(self, input, target, mask=None):
|
| 42 |
+
check_and_warn_input_range(target, 0, 1, 'PerceptualLoss target in partial_losses')
|
| 43 |
+
|
| 44 |
+
# we expect input and target to be in [0, 1] range
|
| 45 |
+
losses = []
|
| 46 |
+
|
| 47 |
+
if self.normalize_inputs:
|
| 48 |
+
features_input = self.do_normalize_inputs(input)
|
| 49 |
+
features_target = self.do_normalize_inputs(target)
|
| 50 |
+
else:
|
| 51 |
+
features_input = input
|
| 52 |
+
features_target = target
|
| 53 |
+
|
| 54 |
+
for layer in self.vgg[:30]:
|
| 55 |
+
|
| 56 |
+
features_input = layer(features_input)
|
| 57 |
+
features_target = layer(features_target)
|
| 58 |
+
|
| 59 |
+
if layer.__class__.__name__ == 'ReLU':
|
| 60 |
+
loss = F.mse_loss(features_input, features_target, reduction='none')
|
| 61 |
+
|
| 62 |
+
if mask is not None:
|
| 63 |
+
cur_mask = F.interpolate(mask, size=features_input.shape[-2:],
|
| 64 |
+
mode='bilinear', align_corners=False)
|
| 65 |
+
loss = loss * (1 - cur_mask)
|
| 66 |
+
|
| 67 |
+
loss = loss.mean(dim=tuple(range(1, len(loss.shape))))
|
| 68 |
+
losses.append(loss)
|
| 69 |
+
|
| 70 |
+
return losses
|
| 71 |
+
|
| 72 |
+
def forward(self, input, target, mask=None):
|
| 73 |
+
losses = self.partial_losses(input, target, mask=mask)
|
| 74 |
+
return torch.stack(losses).sum(dim=0)
|
| 75 |
+
|
| 76 |
+
def get_global_features(self, input):
|
| 77 |
+
check_and_warn_input_range(input, 0, 1, 'PerceptualLoss input in get_global_features')
|
| 78 |
+
|
| 79 |
+
if self.normalize_inputs:
|
| 80 |
+
features_input = self.do_normalize_inputs(input)
|
| 81 |
+
else:
|
| 82 |
+
features_input = input
|
| 83 |
+
|
| 84 |
+
features_input = self.vgg(features_input)
|
| 85 |
+
return features_input
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class ResNetPL(nn.Module):
|
| 89 |
+
def __init__(self, weight=1,
|
| 90 |
+
weights_path=None, arch_encoder='resnet50dilated', segmentation=True):
|
| 91 |
+
super().__init__()
|
| 92 |
+
self.impl = ModelBuilder.get_encoder(weights_path=weights_path,
|
| 93 |
+
arch_encoder=arch_encoder,
|
| 94 |
+
arch_decoder='ppm_deepsup',
|
| 95 |
+
fc_dim=2048,
|
| 96 |
+
segmentation=segmentation)
|
| 97 |
+
self.impl.eval()
|
| 98 |
+
for w in self.impl.parameters():
|
| 99 |
+
w.requires_grad_(False)
|
| 100 |
+
|
| 101 |
+
self.weight = weight
|
| 102 |
+
|
| 103 |
+
def forward(self, pred, target):
|
| 104 |
+
pred = (pred - IMAGENET_MEAN.to(pred)) / IMAGENET_STD.to(pred)
|
| 105 |
+
target = (target - IMAGENET_MEAN.to(target)) / IMAGENET_STD.to(target)
|
| 106 |
+
|
| 107 |
+
pred_feats = self.impl(pred, return_feature_maps=True)
|
| 108 |
+
target_feats = self.impl(target, return_feature_maps=True)
|
| 109 |
+
|
| 110 |
+
result = torch.stack([F.mse_loss(cur_pred, cur_target)
|
| 111 |
+
for cur_pred, cur_target
|
| 112 |
+
in zip(pred_feats, target_feats)]).sum() * self.weight
|
| 113 |
+
return result
|
RAVE-main/annotator/lama/saicinpainting/training/losses/segmentation.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from .constants import weights as constant_weights
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class CrossEntropy2d(nn.Module):
|
| 9 |
+
def __init__(self, reduction="mean", ignore_label=255, weights=None, *args, **kwargs):
|
| 10 |
+
"""
|
| 11 |
+
weight (Tensor, optional): a manual rescaling weight given to each class.
|
| 12 |
+
If given, has to be a Tensor of size "nclasses"
|
| 13 |
+
"""
|
| 14 |
+
super(CrossEntropy2d, self).__init__()
|
| 15 |
+
self.reduction = reduction
|
| 16 |
+
self.ignore_label = ignore_label
|
| 17 |
+
self.weights = weights
|
| 18 |
+
if self.weights is not None:
|
| 19 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 20 |
+
self.weights = torch.FloatTensor(constant_weights[weights]).to(device)
|
| 21 |
+
|
| 22 |
+
def forward(self, predict, target):
|
| 23 |
+
"""
|
| 24 |
+
Args:
|
| 25 |
+
predict:(n, c, h, w)
|
| 26 |
+
target:(n, 1, h, w)
|
| 27 |
+
"""
|
| 28 |
+
target = target.long()
|
| 29 |
+
assert not target.requires_grad
|
| 30 |
+
assert predict.dim() == 4, "{0}".format(predict.size())
|
| 31 |
+
assert target.dim() == 4, "{0}".format(target.size())
|
| 32 |
+
assert predict.size(0) == target.size(0), "{0} vs {1} ".format(predict.size(0), target.size(0))
|
| 33 |
+
assert target.size(1) == 1, "{0}".format(target.size(1))
|
| 34 |
+
assert predict.size(2) == target.size(2), "{0} vs {1} ".format(predict.size(2), target.size(2))
|
| 35 |
+
assert predict.size(3) == target.size(3), "{0} vs {1} ".format(predict.size(3), target.size(3))
|
| 36 |
+
target = target.squeeze(1)
|
| 37 |
+
n, c, h, w = predict.size()
|
| 38 |
+
target_mask = (target >= 0) * (target != self.ignore_label)
|
| 39 |
+
target = target[target_mask]
|
| 40 |
+
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
|
| 41 |
+
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
|
| 42 |
+
loss = F.cross_entropy(predict, target, weight=self.weights, reduction=self.reduction)
|
| 43 |
+
return loss
|
RAVE-main/annotator/lama/saicinpainting/training/losses/style_loss.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torchvision.models as models
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class PerceptualLoss(nn.Module):
|
| 7 |
+
r"""
|
| 8 |
+
Perceptual loss, VGG-based
|
| 9 |
+
https://arxiv.org/abs/1603.08155
|
| 10 |
+
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
|
| 14 |
+
super(PerceptualLoss, self).__init__()
|
| 15 |
+
self.add_module('vgg', VGG19())
|
| 16 |
+
self.criterion = torch.nn.L1Loss()
|
| 17 |
+
self.weights = weights
|
| 18 |
+
|
| 19 |
+
def __call__(self, x, y):
|
| 20 |
+
# Compute features
|
| 21 |
+
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
|
| 22 |
+
|
| 23 |
+
content_loss = 0.0
|
| 24 |
+
content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
|
| 25 |
+
content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
|
| 26 |
+
content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
|
| 27 |
+
content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
|
| 28 |
+
content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
return content_loss
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class VGG19(torch.nn.Module):
|
| 35 |
+
def __init__(self):
|
| 36 |
+
super(VGG19, self).__init__()
|
| 37 |
+
features = models.vgg19(pretrained=True).features
|
| 38 |
+
self.relu1_1 = torch.nn.Sequential()
|
| 39 |
+
self.relu1_2 = torch.nn.Sequential()
|
| 40 |
+
|
| 41 |
+
self.relu2_1 = torch.nn.Sequential()
|
| 42 |
+
self.relu2_2 = torch.nn.Sequential()
|
| 43 |
+
|
| 44 |
+
self.relu3_1 = torch.nn.Sequential()
|
| 45 |
+
self.relu3_2 = torch.nn.Sequential()
|
| 46 |
+
self.relu3_3 = torch.nn.Sequential()
|
| 47 |
+
self.relu3_4 = torch.nn.Sequential()
|
| 48 |
+
|
| 49 |
+
self.relu4_1 = torch.nn.Sequential()
|
| 50 |
+
self.relu4_2 = torch.nn.Sequential()
|
| 51 |
+
self.relu4_3 = torch.nn.Sequential()
|
| 52 |
+
self.relu4_4 = torch.nn.Sequential()
|
| 53 |
+
|
| 54 |
+
self.relu5_1 = torch.nn.Sequential()
|
| 55 |
+
self.relu5_2 = torch.nn.Sequential()
|
| 56 |
+
self.relu5_3 = torch.nn.Sequential()
|
| 57 |
+
self.relu5_4 = torch.nn.Sequential()
|
| 58 |
+
|
| 59 |
+
for x in range(2):
|
| 60 |
+
self.relu1_1.add_module(str(x), features[x])
|
| 61 |
+
|
| 62 |
+
for x in range(2, 4):
|
| 63 |
+
self.relu1_2.add_module(str(x), features[x])
|
| 64 |
+
|
| 65 |
+
for x in range(4, 7):
|
| 66 |
+
self.relu2_1.add_module(str(x), features[x])
|
| 67 |
+
|
| 68 |
+
for x in range(7, 9):
|
| 69 |
+
self.relu2_2.add_module(str(x), features[x])
|
| 70 |
+
|
| 71 |
+
for x in range(9, 12):
|
| 72 |
+
self.relu3_1.add_module(str(x), features[x])
|
| 73 |
+
|
| 74 |
+
for x in range(12, 14):
|
| 75 |
+
self.relu3_2.add_module(str(x), features[x])
|
| 76 |
+
|
| 77 |
+
for x in range(14, 16):
|
| 78 |
+
self.relu3_2.add_module(str(x), features[x])
|
| 79 |
+
|
| 80 |
+
for x in range(16, 18):
|
| 81 |
+
self.relu3_4.add_module(str(x), features[x])
|
| 82 |
+
|
| 83 |
+
for x in range(18, 21):
|
| 84 |
+
self.relu4_1.add_module(str(x), features[x])
|
| 85 |
+
|
| 86 |
+
for x in range(21, 23):
|
| 87 |
+
self.relu4_2.add_module(str(x), features[x])
|
| 88 |
+
|
| 89 |
+
for x in range(23, 25):
|
| 90 |
+
self.relu4_3.add_module(str(x), features[x])
|
| 91 |
+
|
| 92 |
+
for x in range(25, 27):
|
| 93 |
+
self.relu4_4.add_module(str(x), features[x])
|
| 94 |
+
|
| 95 |
+
for x in range(27, 30):
|
| 96 |
+
self.relu5_1.add_module(str(x), features[x])
|
| 97 |
+
|
| 98 |
+
for x in range(30, 32):
|
| 99 |
+
self.relu5_2.add_module(str(x), features[x])
|
| 100 |
+
|
| 101 |
+
for x in range(32, 34):
|
| 102 |
+
self.relu5_3.add_module(str(x), features[x])
|
| 103 |
+
|
| 104 |
+
for x in range(34, 36):
|
| 105 |
+
self.relu5_4.add_module(str(x), features[x])
|
| 106 |
+
|
| 107 |
+
# don't need the gradients, just want the features
|
| 108 |
+
for param in self.parameters():
|
| 109 |
+
param.requires_grad = False
|
| 110 |
+
|
| 111 |
+
def forward(self, x):
|
| 112 |
+
relu1_1 = self.relu1_1(x)
|
| 113 |
+
relu1_2 = self.relu1_2(relu1_1)
|
| 114 |
+
|
| 115 |
+
relu2_1 = self.relu2_1(relu1_2)
|
| 116 |
+
relu2_2 = self.relu2_2(relu2_1)
|
| 117 |
+
|
| 118 |
+
relu3_1 = self.relu3_1(relu2_2)
|
| 119 |
+
relu3_2 = self.relu3_2(relu3_1)
|
| 120 |
+
relu3_3 = self.relu3_3(relu3_2)
|
| 121 |
+
relu3_4 = self.relu3_4(relu3_3)
|
| 122 |
+
|
| 123 |
+
relu4_1 = self.relu4_1(relu3_4)
|
| 124 |
+
relu4_2 = self.relu4_2(relu4_1)
|
| 125 |
+
relu4_3 = self.relu4_3(relu4_2)
|
| 126 |
+
relu4_4 = self.relu4_4(relu4_3)
|
| 127 |
+
|
| 128 |
+
relu5_1 = self.relu5_1(relu4_4)
|
| 129 |
+
relu5_2 = self.relu5_2(relu5_1)
|
| 130 |
+
relu5_3 = self.relu5_3(relu5_2)
|
| 131 |
+
relu5_4 = self.relu5_4(relu5_3)
|
| 132 |
+
|
| 133 |
+
out = {
|
| 134 |
+
'relu1_1': relu1_1,
|
| 135 |
+
'relu1_2': relu1_2,
|
| 136 |
+
|
| 137 |
+
'relu2_1': relu2_1,
|
| 138 |
+
'relu2_2': relu2_2,
|
| 139 |
+
|
| 140 |
+
'relu3_1': relu3_1,
|
| 141 |
+
'relu3_2': relu3_2,
|
| 142 |
+
'relu3_3': relu3_3,
|
| 143 |
+
'relu3_4': relu3_4,
|
| 144 |
+
|
| 145 |
+
'relu4_1': relu4_1,
|
| 146 |
+
'relu4_2': relu4_2,
|
| 147 |
+
'relu4_3': relu4_3,
|
| 148 |
+
'relu4_4': relu4_4,
|
| 149 |
+
|
| 150 |
+
'relu5_1': relu5_1,
|
| 151 |
+
'relu5_2': relu5_2,
|
| 152 |
+
'relu5_3': relu5_3,
|
| 153 |
+
'relu5_4': relu5_4,
|
| 154 |
+
}
|
| 155 |
+
return out
|
RAVE-main/annotator/lama/saicinpainting/training/modules/__init__.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
|
| 3 |
+
from annotator.lama.saicinpainting.training.modules.ffc import FFCResNetGenerator
|
| 4 |
+
from annotator.lama.saicinpainting.training.modules.pix2pixhd import GlobalGenerator, MultiDilatedGlobalGenerator, \
|
| 5 |
+
NLayerDiscriminator, MultidilatedNLayerDiscriminator
|
| 6 |
+
|
| 7 |
+
def make_generator(config, kind, **kwargs):
|
| 8 |
+
logging.info(f'Make generator {kind}')
|
| 9 |
+
|
| 10 |
+
if kind == 'pix2pixhd_multidilated':
|
| 11 |
+
return MultiDilatedGlobalGenerator(**kwargs)
|
| 12 |
+
|
| 13 |
+
if kind == 'pix2pixhd_global':
|
| 14 |
+
return GlobalGenerator(**kwargs)
|
| 15 |
+
|
| 16 |
+
if kind == 'ffc_resnet':
|
| 17 |
+
return FFCResNetGenerator(**kwargs)
|
| 18 |
+
|
| 19 |
+
raise ValueError(f'Unknown generator kind {kind}')
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def make_discriminator(kind, **kwargs):
|
| 23 |
+
logging.info(f'Make discriminator {kind}')
|
| 24 |
+
|
| 25 |
+
if kind == 'pix2pixhd_nlayer_multidilated':
|
| 26 |
+
return MultidilatedNLayerDiscriminator(**kwargs)
|
| 27 |
+
|
| 28 |
+
if kind == 'pix2pixhd_nlayer':
|
| 29 |
+
return NLayerDiscriminator(**kwargs)
|
| 30 |
+
|
| 31 |
+
raise ValueError(f'Unknown discriminator kind {kind}')
|
RAVE-main/annotator/lama/saicinpainting/utils.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import bisect
|
| 2 |
+
import functools
|
| 3 |
+
import logging
|
| 4 |
+
import numbers
|
| 5 |
+
import os
|
| 6 |
+
import signal
|
| 7 |
+
import sys
|
| 8 |
+
import traceback
|
| 9 |
+
import warnings
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
from pytorch_lightning import seed_everything
|
| 13 |
+
|
| 14 |
+
LOGGER = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def check_and_warn_input_range(tensor, min_value, max_value, name):
|
| 18 |
+
actual_min = tensor.min()
|
| 19 |
+
actual_max = tensor.max()
|
| 20 |
+
if actual_min < min_value or actual_max > max_value:
|
| 21 |
+
warnings.warn(f"{name} must be in {min_value}..{max_value} range, but it ranges {actual_min}..{actual_max}")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def sum_dict_with_prefix(target, cur_dict, prefix, default=0):
|
| 25 |
+
for k, v in cur_dict.items():
|
| 26 |
+
target_key = prefix + k
|
| 27 |
+
target[target_key] = target.get(target_key, default) + v
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def average_dicts(dict_list):
|
| 31 |
+
result = {}
|
| 32 |
+
norm = 1e-3
|
| 33 |
+
for dct in dict_list:
|
| 34 |
+
sum_dict_with_prefix(result, dct, '')
|
| 35 |
+
norm += 1
|
| 36 |
+
for k in list(result):
|
| 37 |
+
result[k] /= norm
|
| 38 |
+
return result
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def add_prefix_to_keys(dct, prefix):
|
| 42 |
+
return {prefix + k: v for k, v in dct.items()}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def set_requires_grad(module, value):
|
| 46 |
+
for param in module.parameters():
|
| 47 |
+
param.requires_grad = value
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def flatten_dict(dct):
|
| 51 |
+
result = {}
|
| 52 |
+
for k, v in dct.items():
|
| 53 |
+
if isinstance(k, tuple):
|
| 54 |
+
k = '_'.join(k)
|
| 55 |
+
if isinstance(v, dict):
|
| 56 |
+
for sub_k, sub_v in flatten_dict(v).items():
|
| 57 |
+
result[f'{k}_{sub_k}'] = sub_v
|
| 58 |
+
else:
|
| 59 |
+
result[k] = v
|
| 60 |
+
return result
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class LinearRamp:
|
| 64 |
+
def __init__(self, start_value=0, end_value=1, start_iter=-1, end_iter=0):
|
| 65 |
+
self.start_value = start_value
|
| 66 |
+
self.end_value = end_value
|
| 67 |
+
self.start_iter = start_iter
|
| 68 |
+
self.end_iter = end_iter
|
| 69 |
+
|
| 70 |
+
def __call__(self, i):
|
| 71 |
+
if i < self.start_iter:
|
| 72 |
+
return self.start_value
|
| 73 |
+
if i >= self.end_iter:
|
| 74 |
+
return self.end_value
|
| 75 |
+
part = (i - self.start_iter) / (self.end_iter - self.start_iter)
|
| 76 |
+
return self.start_value * (1 - part) + self.end_value * part
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class LadderRamp:
|
| 80 |
+
def __init__(self, start_iters, values):
|
| 81 |
+
self.start_iters = start_iters
|
| 82 |
+
self.values = values
|
| 83 |
+
assert len(values) == len(start_iters) + 1, (len(values), len(start_iters))
|
| 84 |
+
|
| 85 |
+
def __call__(self, i):
|
| 86 |
+
segment_i = bisect.bisect_right(self.start_iters, i)
|
| 87 |
+
return self.values[segment_i]
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def get_ramp(kind='ladder', **kwargs):
|
| 91 |
+
if kind == 'linear':
|
| 92 |
+
return LinearRamp(**kwargs)
|
| 93 |
+
if kind == 'ladder':
|
| 94 |
+
return LadderRamp(**kwargs)
|
| 95 |
+
raise ValueError(f'Unexpected ramp kind: {kind}')
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def print_traceback_handler(sig, frame):
|
| 99 |
+
LOGGER.warning(f'Received signal {sig}')
|
| 100 |
+
bt = ''.join(traceback.format_stack())
|
| 101 |
+
LOGGER.warning(f'Requested stack trace:\n{bt}')
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def register_debug_signal_handlers(sig=None, handler=print_traceback_handler):
|
| 105 |
+
LOGGER.warning(f'Setting signal {sig} handler {handler}')
|
| 106 |
+
signal.signal(sig, handler)
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def handle_deterministic_config(config):
|
| 110 |
+
seed = dict(config).get('seed', None)
|
| 111 |
+
if seed is None:
|
| 112 |
+
return False
|
| 113 |
+
|
| 114 |
+
seed_everything(seed)
|
| 115 |
+
return True
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def get_shape(t):
|
| 119 |
+
if torch.is_tensor(t):
|
| 120 |
+
return tuple(t.shape)
|
| 121 |
+
elif isinstance(t, dict):
|
| 122 |
+
return {n: get_shape(q) for n, q in t.items()}
|
| 123 |
+
elif isinstance(t, (list, tuple)):
|
| 124 |
+
return [get_shape(q) for q in t]
|
| 125 |
+
elif isinstance(t, numbers.Number):
|
| 126 |
+
return type(t)
|
| 127 |
+
else:
|
| 128 |
+
raise ValueError('unexpected type {}'.format(type(t)))
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def get_has_ddp_rank():
|
| 132 |
+
master_port = os.environ.get('MASTER_PORT', None)
|
| 133 |
+
node_rank = os.environ.get('NODE_RANK', None)
|
| 134 |
+
local_rank = os.environ.get('LOCAL_RANK', None)
|
| 135 |
+
world_size = os.environ.get('WORLD_SIZE', None)
|
| 136 |
+
has_rank = master_port is not None or node_rank is not None or local_rank is not None or world_size is not None
|
| 137 |
+
return has_rank
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def handle_ddp_subprocess():
|
| 141 |
+
def main_decorator(main_func):
|
| 142 |
+
@functools.wraps(main_func)
|
| 143 |
+
def new_main(*args, **kwargs):
|
| 144 |
+
# Trainer sets MASTER_PORT, NODE_RANK, LOCAL_RANK, WORLD_SIZE
|
| 145 |
+
parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None)
|
| 146 |
+
has_parent = parent_cwd is not None
|
| 147 |
+
has_rank = get_has_ddp_rank()
|
| 148 |
+
assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}'
|
| 149 |
+
|
| 150 |
+
if has_parent:
|
| 151 |
+
# we are in the worker
|
| 152 |
+
sys.argv.extend([
|
| 153 |
+
f'hydra.run.dir={parent_cwd}',
|
| 154 |
+
# 'hydra/hydra_logging=disabled',
|
| 155 |
+
# 'hydra/job_logging=disabled'
|
| 156 |
+
])
|
| 157 |
+
# do nothing if this is a top-level process
|
| 158 |
+
# TRAINING_PARENT_WORK_DIR is set in handle_ddp_parent_process after hydra initialization
|
| 159 |
+
|
| 160 |
+
main_func(*args, **kwargs)
|
| 161 |
+
return new_main
|
| 162 |
+
return main_decorator
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def handle_ddp_parent_process():
|
| 166 |
+
parent_cwd = os.environ.get('TRAINING_PARENT_WORK_DIR', None)
|
| 167 |
+
has_parent = parent_cwd is not None
|
| 168 |
+
has_rank = get_has_ddp_rank()
|
| 169 |
+
assert has_parent == has_rank, f'Inconsistent state: has_parent={has_parent}, has_rank={has_rank}'
|
| 170 |
+
|
| 171 |
+
if parent_cwd is None:
|
| 172 |
+
os.environ['TRAINING_PARENT_WORK_DIR'] = os.getcwd()
|
| 173 |
+
|
| 174 |
+
return has_parent
|
RAVE-main/annotator/mlsd/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "{}"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright 2021-present NAVER Corp.
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
RAVE-main/annotator/mlsd/__init__.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
|
| 8 |
+
from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
|
| 9 |
+
from .utils import pred_lines
|
| 10 |
+
from modules import devices
|
| 11 |
+
from annotator.annotator_path import models_path
|
| 12 |
+
|
| 13 |
+
mlsdmodel = None
|
| 14 |
+
remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/mlsd_large_512_fp32.pth"
|
| 15 |
+
old_modeldir = os.path.dirname(os.path.realpath(__file__))
|
| 16 |
+
modeldir = os.path.join(models_path, "mlsd")
|
| 17 |
+
|
| 18 |
+
def unload_mlsd_model():
|
| 19 |
+
global mlsdmodel
|
| 20 |
+
if mlsdmodel is not None:
|
| 21 |
+
mlsdmodel = mlsdmodel.cpu()
|
| 22 |
+
|
| 23 |
+
def apply_mlsd(input_image, thr_v, thr_d):
|
| 24 |
+
global modelpath, mlsdmodel
|
| 25 |
+
if mlsdmodel is None:
|
| 26 |
+
modelpath = os.path.join(modeldir, "mlsd_large_512_fp32.pth")
|
| 27 |
+
old_modelpath = os.path.join(old_modeldir, "mlsd_large_512_fp32.pth")
|
| 28 |
+
if os.path.exists(old_modelpath):
|
| 29 |
+
modelpath = old_modelpath
|
| 30 |
+
elif not os.path.exists(modelpath):
|
| 31 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 32 |
+
load_file_from_url(remote_model_path, model_dir=modeldir)
|
| 33 |
+
mlsdmodel = MobileV2_MLSD_Large()
|
| 34 |
+
mlsdmodel.load_state_dict(torch.load(modelpath), strict=True)
|
| 35 |
+
mlsdmodel = mlsdmodel.to(devices.get_device_for("controlnet")).eval()
|
| 36 |
+
|
| 37 |
+
model = mlsdmodel
|
| 38 |
+
assert input_image.ndim == 3
|
| 39 |
+
img = input_image
|
| 40 |
+
img_output = np.zeros_like(img)
|
| 41 |
+
try:
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
lines = pred_lines(img, model, [img.shape[0], img.shape[1]], thr_v, thr_d)
|
| 44 |
+
for line in lines:
|
| 45 |
+
x_start, y_start, x_end, y_end = [int(val) for val in line]
|
| 46 |
+
cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
|
| 47 |
+
except Exception as e:
|
| 48 |
+
pass
|
| 49 |
+
return img_output[:, :, 0]
|
RAVE-main/annotator/mlsd/models/mbv2_mlsd_large.py
ADDED
|
@@ -0,0 +1,292 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.utils.model_zoo as model_zoo
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BlockTypeA(nn.Module):
|
| 10 |
+
def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
|
| 11 |
+
super(BlockTypeA, self).__init__()
|
| 12 |
+
self.conv1 = nn.Sequential(
|
| 13 |
+
nn.Conv2d(in_c2, out_c2, kernel_size=1),
|
| 14 |
+
nn.BatchNorm2d(out_c2),
|
| 15 |
+
nn.ReLU(inplace=True)
|
| 16 |
+
)
|
| 17 |
+
self.conv2 = nn.Sequential(
|
| 18 |
+
nn.Conv2d(in_c1, out_c1, kernel_size=1),
|
| 19 |
+
nn.BatchNorm2d(out_c1),
|
| 20 |
+
nn.ReLU(inplace=True)
|
| 21 |
+
)
|
| 22 |
+
self.upscale = upscale
|
| 23 |
+
|
| 24 |
+
def forward(self, a, b):
|
| 25 |
+
b = self.conv1(b)
|
| 26 |
+
a = self.conv2(a)
|
| 27 |
+
if self.upscale:
|
| 28 |
+
b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
|
| 29 |
+
return torch.cat((a, b), dim=1)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class BlockTypeB(nn.Module):
|
| 33 |
+
def __init__(self, in_c, out_c):
|
| 34 |
+
super(BlockTypeB, self).__init__()
|
| 35 |
+
self.conv1 = nn.Sequential(
|
| 36 |
+
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
|
| 37 |
+
nn.BatchNorm2d(in_c),
|
| 38 |
+
nn.ReLU()
|
| 39 |
+
)
|
| 40 |
+
self.conv2 = nn.Sequential(
|
| 41 |
+
nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
|
| 42 |
+
nn.BatchNorm2d(out_c),
|
| 43 |
+
nn.ReLU()
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
def forward(self, x):
|
| 47 |
+
x = self.conv1(x) + x
|
| 48 |
+
x = self.conv2(x)
|
| 49 |
+
return x
|
| 50 |
+
|
| 51 |
+
class BlockTypeC(nn.Module):
|
| 52 |
+
def __init__(self, in_c, out_c):
|
| 53 |
+
super(BlockTypeC, self).__init__()
|
| 54 |
+
self.conv1 = nn.Sequential(
|
| 55 |
+
nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
|
| 56 |
+
nn.BatchNorm2d(in_c),
|
| 57 |
+
nn.ReLU()
|
| 58 |
+
)
|
| 59 |
+
self.conv2 = nn.Sequential(
|
| 60 |
+
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
|
| 61 |
+
nn.BatchNorm2d(in_c),
|
| 62 |
+
nn.ReLU()
|
| 63 |
+
)
|
| 64 |
+
self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
|
| 65 |
+
|
| 66 |
+
def forward(self, x):
|
| 67 |
+
x = self.conv1(x)
|
| 68 |
+
x = self.conv2(x)
|
| 69 |
+
x = self.conv3(x)
|
| 70 |
+
return x
|
| 71 |
+
|
| 72 |
+
def _make_divisible(v, divisor, min_value=None):
|
| 73 |
+
"""
|
| 74 |
+
This function is taken from the original tf repo.
|
| 75 |
+
It ensures that all layers have a channel number that is divisible by 8
|
| 76 |
+
It can be seen here:
|
| 77 |
+
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
|
| 78 |
+
:param v:
|
| 79 |
+
:param divisor:
|
| 80 |
+
:param min_value:
|
| 81 |
+
:return:
|
| 82 |
+
"""
|
| 83 |
+
if min_value is None:
|
| 84 |
+
min_value = divisor
|
| 85 |
+
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
|
| 86 |
+
# Make sure that round down does not go down by more than 10%.
|
| 87 |
+
if new_v < 0.9 * v:
|
| 88 |
+
new_v += divisor
|
| 89 |
+
return new_v
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
class ConvBNReLU(nn.Sequential):
|
| 93 |
+
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
|
| 94 |
+
self.channel_pad = out_planes - in_planes
|
| 95 |
+
self.stride = stride
|
| 96 |
+
#padding = (kernel_size - 1) // 2
|
| 97 |
+
|
| 98 |
+
# TFLite uses slightly different padding than PyTorch
|
| 99 |
+
if stride == 2:
|
| 100 |
+
padding = 0
|
| 101 |
+
else:
|
| 102 |
+
padding = (kernel_size - 1) // 2
|
| 103 |
+
|
| 104 |
+
super(ConvBNReLU, self).__init__(
|
| 105 |
+
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
|
| 106 |
+
nn.BatchNorm2d(out_planes),
|
| 107 |
+
nn.ReLU6(inplace=True)
|
| 108 |
+
)
|
| 109 |
+
self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def forward(self, x):
|
| 113 |
+
# TFLite uses different padding
|
| 114 |
+
if self.stride == 2:
|
| 115 |
+
x = F.pad(x, (0, 1, 0, 1), "constant", 0)
|
| 116 |
+
#print(x.shape)
|
| 117 |
+
|
| 118 |
+
for module in self:
|
| 119 |
+
if not isinstance(module, nn.MaxPool2d):
|
| 120 |
+
x = module(x)
|
| 121 |
+
return x
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class InvertedResidual(nn.Module):
|
| 125 |
+
def __init__(self, inp, oup, stride, expand_ratio):
|
| 126 |
+
super(InvertedResidual, self).__init__()
|
| 127 |
+
self.stride = stride
|
| 128 |
+
assert stride in [1, 2]
|
| 129 |
+
|
| 130 |
+
hidden_dim = int(round(inp * expand_ratio))
|
| 131 |
+
self.use_res_connect = self.stride == 1 and inp == oup
|
| 132 |
+
|
| 133 |
+
layers = []
|
| 134 |
+
if expand_ratio != 1:
|
| 135 |
+
# pw
|
| 136 |
+
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
|
| 137 |
+
layers.extend([
|
| 138 |
+
# dw
|
| 139 |
+
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
|
| 140 |
+
# pw-linear
|
| 141 |
+
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
|
| 142 |
+
nn.BatchNorm2d(oup),
|
| 143 |
+
])
|
| 144 |
+
self.conv = nn.Sequential(*layers)
|
| 145 |
+
|
| 146 |
+
def forward(self, x):
|
| 147 |
+
if self.use_res_connect:
|
| 148 |
+
return x + self.conv(x)
|
| 149 |
+
else:
|
| 150 |
+
return self.conv(x)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
class MobileNetV2(nn.Module):
|
| 154 |
+
def __init__(self, pretrained=True):
|
| 155 |
+
"""
|
| 156 |
+
MobileNet V2 main class
|
| 157 |
+
Args:
|
| 158 |
+
num_classes (int): Number of classes
|
| 159 |
+
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
|
| 160 |
+
inverted_residual_setting: Network structure
|
| 161 |
+
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
|
| 162 |
+
Set to 1 to turn off rounding
|
| 163 |
+
block: Module specifying inverted residual building block for mobilenet
|
| 164 |
+
"""
|
| 165 |
+
super(MobileNetV2, self).__init__()
|
| 166 |
+
|
| 167 |
+
block = InvertedResidual
|
| 168 |
+
input_channel = 32
|
| 169 |
+
last_channel = 1280
|
| 170 |
+
width_mult = 1.0
|
| 171 |
+
round_nearest = 8
|
| 172 |
+
|
| 173 |
+
inverted_residual_setting = [
|
| 174 |
+
# t, c, n, s
|
| 175 |
+
[1, 16, 1, 1],
|
| 176 |
+
[6, 24, 2, 2],
|
| 177 |
+
[6, 32, 3, 2],
|
| 178 |
+
[6, 64, 4, 2],
|
| 179 |
+
[6, 96, 3, 1],
|
| 180 |
+
#[6, 160, 3, 2],
|
| 181 |
+
#[6, 320, 1, 1],
|
| 182 |
+
]
|
| 183 |
+
|
| 184 |
+
# only check the first element, assuming user knows t,c,n,s are required
|
| 185 |
+
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
|
| 186 |
+
raise ValueError("inverted_residual_setting should be non-empty "
|
| 187 |
+
"or a 4-element list, got {}".format(inverted_residual_setting))
|
| 188 |
+
|
| 189 |
+
# building first layer
|
| 190 |
+
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
|
| 191 |
+
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
|
| 192 |
+
features = [ConvBNReLU(4, input_channel, stride=2)]
|
| 193 |
+
# building inverted residual blocks
|
| 194 |
+
for t, c, n, s in inverted_residual_setting:
|
| 195 |
+
output_channel = _make_divisible(c * width_mult, round_nearest)
|
| 196 |
+
for i in range(n):
|
| 197 |
+
stride = s if i == 0 else 1
|
| 198 |
+
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
|
| 199 |
+
input_channel = output_channel
|
| 200 |
+
|
| 201 |
+
self.features = nn.Sequential(*features)
|
| 202 |
+
self.fpn_selected = [1, 3, 6, 10, 13]
|
| 203 |
+
# weight initialization
|
| 204 |
+
for m in self.modules():
|
| 205 |
+
if isinstance(m, nn.Conv2d):
|
| 206 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out')
|
| 207 |
+
if m.bias is not None:
|
| 208 |
+
nn.init.zeros_(m.bias)
|
| 209 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 210 |
+
nn.init.ones_(m.weight)
|
| 211 |
+
nn.init.zeros_(m.bias)
|
| 212 |
+
elif isinstance(m, nn.Linear):
|
| 213 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
| 214 |
+
nn.init.zeros_(m.bias)
|
| 215 |
+
if pretrained:
|
| 216 |
+
self._load_pretrained_model()
|
| 217 |
+
|
| 218 |
+
def _forward_impl(self, x):
|
| 219 |
+
# This exists since TorchScript doesn't support inheritance, so the superclass method
|
| 220 |
+
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
|
| 221 |
+
fpn_features = []
|
| 222 |
+
for i, f in enumerate(self.features):
|
| 223 |
+
if i > self.fpn_selected[-1]:
|
| 224 |
+
break
|
| 225 |
+
x = f(x)
|
| 226 |
+
if i in self.fpn_selected:
|
| 227 |
+
fpn_features.append(x)
|
| 228 |
+
|
| 229 |
+
c1, c2, c3, c4, c5 = fpn_features
|
| 230 |
+
return c1, c2, c3, c4, c5
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def forward(self, x):
|
| 234 |
+
return self._forward_impl(x)
|
| 235 |
+
|
| 236 |
+
def _load_pretrained_model(self):
|
| 237 |
+
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
|
| 238 |
+
model_dict = {}
|
| 239 |
+
state_dict = self.state_dict()
|
| 240 |
+
for k, v in pretrain_dict.items():
|
| 241 |
+
if k in state_dict:
|
| 242 |
+
model_dict[k] = v
|
| 243 |
+
state_dict.update(model_dict)
|
| 244 |
+
self.load_state_dict(state_dict)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class MobileV2_MLSD_Large(nn.Module):
|
| 248 |
+
def __init__(self):
|
| 249 |
+
super(MobileV2_MLSD_Large, self).__init__()
|
| 250 |
+
|
| 251 |
+
self.backbone = MobileNetV2(pretrained=False)
|
| 252 |
+
## A, B
|
| 253 |
+
self.block15 = BlockTypeA(in_c1= 64, in_c2= 96,
|
| 254 |
+
out_c1= 64, out_c2=64,
|
| 255 |
+
upscale=False)
|
| 256 |
+
self.block16 = BlockTypeB(128, 64)
|
| 257 |
+
|
| 258 |
+
## A, B
|
| 259 |
+
self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64,
|
| 260 |
+
out_c1= 64, out_c2= 64)
|
| 261 |
+
self.block18 = BlockTypeB(128, 64)
|
| 262 |
+
|
| 263 |
+
## A, B
|
| 264 |
+
self.block19 = BlockTypeA(in_c1=24, in_c2=64,
|
| 265 |
+
out_c1=64, out_c2=64)
|
| 266 |
+
self.block20 = BlockTypeB(128, 64)
|
| 267 |
+
|
| 268 |
+
## A, B, C
|
| 269 |
+
self.block21 = BlockTypeA(in_c1=16, in_c2=64,
|
| 270 |
+
out_c1=64, out_c2=64)
|
| 271 |
+
self.block22 = BlockTypeB(128, 64)
|
| 272 |
+
|
| 273 |
+
self.block23 = BlockTypeC(64, 16)
|
| 274 |
+
|
| 275 |
+
def forward(self, x):
|
| 276 |
+
c1, c2, c3, c4, c5 = self.backbone(x)
|
| 277 |
+
|
| 278 |
+
x = self.block15(c4, c5)
|
| 279 |
+
x = self.block16(x)
|
| 280 |
+
|
| 281 |
+
x = self.block17(c3, x)
|
| 282 |
+
x = self.block18(x)
|
| 283 |
+
|
| 284 |
+
x = self.block19(c2, x)
|
| 285 |
+
x = self.block20(x)
|
| 286 |
+
|
| 287 |
+
x = self.block21(c1, x)
|
| 288 |
+
x = self.block22(x)
|
| 289 |
+
x = self.block23(x)
|
| 290 |
+
x = x[:, 7:, :, :]
|
| 291 |
+
|
| 292 |
+
return x
|
RAVE-main/annotator/mlsd/models/mbv2_mlsd_tiny.py
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch.utils.model_zoo as model_zoo
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class BlockTypeA(nn.Module):
|
| 10 |
+
def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
|
| 11 |
+
super(BlockTypeA, self).__init__()
|
| 12 |
+
self.conv1 = nn.Sequential(
|
| 13 |
+
nn.Conv2d(in_c2, out_c2, kernel_size=1),
|
| 14 |
+
nn.BatchNorm2d(out_c2),
|
| 15 |
+
nn.ReLU(inplace=True)
|
| 16 |
+
)
|
| 17 |
+
self.conv2 = nn.Sequential(
|
| 18 |
+
nn.Conv2d(in_c1, out_c1, kernel_size=1),
|
| 19 |
+
nn.BatchNorm2d(out_c1),
|
| 20 |
+
nn.ReLU(inplace=True)
|
| 21 |
+
)
|
| 22 |
+
self.upscale = upscale
|
| 23 |
+
|
| 24 |
+
def forward(self, a, b):
|
| 25 |
+
b = self.conv1(b)
|
| 26 |
+
a = self.conv2(a)
|
| 27 |
+
b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
|
| 28 |
+
return torch.cat((a, b), dim=1)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class BlockTypeB(nn.Module):
|
| 32 |
+
def __init__(self, in_c, out_c):
|
| 33 |
+
super(BlockTypeB, self).__init__()
|
| 34 |
+
self.conv1 = nn.Sequential(
|
| 35 |
+
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
|
| 36 |
+
nn.BatchNorm2d(in_c),
|
| 37 |
+
nn.ReLU()
|
| 38 |
+
)
|
| 39 |
+
self.conv2 = nn.Sequential(
|
| 40 |
+
nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
|
| 41 |
+
nn.BatchNorm2d(out_c),
|
| 42 |
+
nn.ReLU()
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
def forward(self, x):
|
| 46 |
+
x = self.conv1(x) + x
|
| 47 |
+
x = self.conv2(x)
|
| 48 |
+
return x
|
| 49 |
+
|
| 50 |
+
class BlockTypeC(nn.Module):
|
| 51 |
+
def __init__(self, in_c, out_c):
|
| 52 |
+
super(BlockTypeC, self).__init__()
|
| 53 |
+
self.conv1 = nn.Sequential(
|
| 54 |
+
nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
|
| 55 |
+
nn.BatchNorm2d(in_c),
|
| 56 |
+
nn.ReLU()
|
| 57 |
+
)
|
| 58 |
+
self.conv2 = nn.Sequential(
|
| 59 |
+
nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
|
| 60 |
+
nn.BatchNorm2d(in_c),
|
| 61 |
+
nn.ReLU()
|
| 62 |
+
)
|
| 63 |
+
self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
|
| 64 |
+
|
| 65 |
+
def forward(self, x):
|
| 66 |
+
x = self.conv1(x)
|
| 67 |
+
x = self.conv2(x)
|
| 68 |
+
x = self.conv3(x)
|
| 69 |
+
return x
|
| 70 |
+
|
| 71 |
+
def _make_divisible(v, divisor, min_value=None):
|
| 72 |
+
"""
|
| 73 |
+
This function is taken from the original tf repo.
|
| 74 |
+
It ensures that all layers have a channel number that is divisible by 8
|
| 75 |
+
It can be seen here:
|
| 76 |
+
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
|
| 77 |
+
:param v:
|
| 78 |
+
:param divisor:
|
| 79 |
+
:param min_value:
|
| 80 |
+
:return:
|
| 81 |
+
"""
|
| 82 |
+
if min_value is None:
|
| 83 |
+
min_value = divisor
|
| 84 |
+
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
|
| 85 |
+
# Make sure that round down does not go down by more than 10%.
|
| 86 |
+
if new_v < 0.9 * v:
|
| 87 |
+
new_v += divisor
|
| 88 |
+
return new_v
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class ConvBNReLU(nn.Sequential):
|
| 92 |
+
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
|
| 93 |
+
self.channel_pad = out_planes - in_planes
|
| 94 |
+
self.stride = stride
|
| 95 |
+
#padding = (kernel_size - 1) // 2
|
| 96 |
+
|
| 97 |
+
# TFLite uses slightly different padding than PyTorch
|
| 98 |
+
if stride == 2:
|
| 99 |
+
padding = 0
|
| 100 |
+
else:
|
| 101 |
+
padding = (kernel_size - 1) // 2
|
| 102 |
+
|
| 103 |
+
super(ConvBNReLU, self).__init__(
|
| 104 |
+
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
|
| 105 |
+
nn.BatchNorm2d(out_planes),
|
| 106 |
+
nn.ReLU6(inplace=True)
|
| 107 |
+
)
|
| 108 |
+
self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def forward(self, x):
|
| 112 |
+
# TFLite uses different padding
|
| 113 |
+
if self.stride == 2:
|
| 114 |
+
x = F.pad(x, (0, 1, 0, 1), "constant", 0)
|
| 115 |
+
#print(x.shape)
|
| 116 |
+
|
| 117 |
+
for module in self:
|
| 118 |
+
if not isinstance(module, nn.MaxPool2d):
|
| 119 |
+
x = module(x)
|
| 120 |
+
return x
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class InvertedResidual(nn.Module):
|
| 124 |
+
def __init__(self, inp, oup, stride, expand_ratio):
|
| 125 |
+
super(InvertedResidual, self).__init__()
|
| 126 |
+
self.stride = stride
|
| 127 |
+
assert stride in [1, 2]
|
| 128 |
+
|
| 129 |
+
hidden_dim = int(round(inp * expand_ratio))
|
| 130 |
+
self.use_res_connect = self.stride == 1 and inp == oup
|
| 131 |
+
|
| 132 |
+
layers = []
|
| 133 |
+
if expand_ratio != 1:
|
| 134 |
+
# pw
|
| 135 |
+
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
|
| 136 |
+
layers.extend([
|
| 137 |
+
# dw
|
| 138 |
+
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
|
| 139 |
+
# pw-linear
|
| 140 |
+
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
|
| 141 |
+
nn.BatchNorm2d(oup),
|
| 142 |
+
])
|
| 143 |
+
self.conv = nn.Sequential(*layers)
|
| 144 |
+
|
| 145 |
+
def forward(self, x):
|
| 146 |
+
if self.use_res_connect:
|
| 147 |
+
return x + self.conv(x)
|
| 148 |
+
else:
|
| 149 |
+
return self.conv(x)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class MobileNetV2(nn.Module):
|
| 153 |
+
def __init__(self, pretrained=True):
|
| 154 |
+
"""
|
| 155 |
+
MobileNet V2 main class
|
| 156 |
+
Args:
|
| 157 |
+
num_classes (int): Number of classes
|
| 158 |
+
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
|
| 159 |
+
inverted_residual_setting: Network structure
|
| 160 |
+
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
|
| 161 |
+
Set to 1 to turn off rounding
|
| 162 |
+
block: Module specifying inverted residual building block for mobilenet
|
| 163 |
+
"""
|
| 164 |
+
super(MobileNetV2, self).__init__()
|
| 165 |
+
|
| 166 |
+
block = InvertedResidual
|
| 167 |
+
input_channel = 32
|
| 168 |
+
last_channel = 1280
|
| 169 |
+
width_mult = 1.0
|
| 170 |
+
round_nearest = 8
|
| 171 |
+
|
| 172 |
+
inverted_residual_setting = [
|
| 173 |
+
# t, c, n, s
|
| 174 |
+
[1, 16, 1, 1],
|
| 175 |
+
[6, 24, 2, 2],
|
| 176 |
+
[6, 32, 3, 2],
|
| 177 |
+
[6, 64, 4, 2],
|
| 178 |
+
#[6, 96, 3, 1],
|
| 179 |
+
#[6, 160, 3, 2],
|
| 180 |
+
#[6, 320, 1, 1],
|
| 181 |
+
]
|
| 182 |
+
|
| 183 |
+
# only check the first element, assuming user knows t,c,n,s are required
|
| 184 |
+
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
|
| 185 |
+
raise ValueError("inverted_residual_setting should be non-empty "
|
| 186 |
+
"or a 4-element list, got {}".format(inverted_residual_setting))
|
| 187 |
+
|
| 188 |
+
# building first layer
|
| 189 |
+
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
|
| 190 |
+
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
|
| 191 |
+
features = [ConvBNReLU(4, input_channel, stride=2)]
|
| 192 |
+
# building inverted residual blocks
|
| 193 |
+
for t, c, n, s in inverted_residual_setting:
|
| 194 |
+
output_channel = _make_divisible(c * width_mult, round_nearest)
|
| 195 |
+
for i in range(n):
|
| 196 |
+
stride = s if i == 0 else 1
|
| 197 |
+
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
|
| 198 |
+
input_channel = output_channel
|
| 199 |
+
self.features = nn.Sequential(*features)
|
| 200 |
+
|
| 201 |
+
self.fpn_selected = [3, 6, 10]
|
| 202 |
+
# weight initialization
|
| 203 |
+
for m in self.modules():
|
| 204 |
+
if isinstance(m, nn.Conv2d):
|
| 205 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out')
|
| 206 |
+
if m.bias is not None:
|
| 207 |
+
nn.init.zeros_(m.bias)
|
| 208 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 209 |
+
nn.init.ones_(m.weight)
|
| 210 |
+
nn.init.zeros_(m.bias)
|
| 211 |
+
elif isinstance(m, nn.Linear):
|
| 212 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
| 213 |
+
nn.init.zeros_(m.bias)
|
| 214 |
+
|
| 215 |
+
#if pretrained:
|
| 216 |
+
# self._load_pretrained_model()
|
| 217 |
+
|
| 218 |
+
def _forward_impl(self, x):
|
| 219 |
+
# This exists since TorchScript doesn't support inheritance, so the superclass method
|
| 220 |
+
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
|
| 221 |
+
fpn_features = []
|
| 222 |
+
for i, f in enumerate(self.features):
|
| 223 |
+
if i > self.fpn_selected[-1]:
|
| 224 |
+
break
|
| 225 |
+
x = f(x)
|
| 226 |
+
if i in self.fpn_selected:
|
| 227 |
+
fpn_features.append(x)
|
| 228 |
+
|
| 229 |
+
c2, c3, c4 = fpn_features
|
| 230 |
+
return c2, c3, c4
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
def forward(self, x):
|
| 234 |
+
return self._forward_impl(x)
|
| 235 |
+
|
| 236 |
+
def _load_pretrained_model(self):
|
| 237 |
+
pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
|
| 238 |
+
model_dict = {}
|
| 239 |
+
state_dict = self.state_dict()
|
| 240 |
+
for k, v in pretrain_dict.items():
|
| 241 |
+
if k in state_dict:
|
| 242 |
+
model_dict[k] = v
|
| 243 |
+
state_dict.update(model_dict)
|
| 244 |
+
self.load_state_dict(state_dict)
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
class MobileV2_MLSD_Tiny(nn.Module):
|
| 248 |
+
def __init__(self):
|
| 249 |
+
super(MobileV2_MLSD_Tiny, self).__init__()
|
| 250 |
+
|
| 251 |
+
self.backbone = MobileNetV2(pretrained=True)
|
| 252 |
+
|
| 253 |
+
self.block12 = BlockTypeA(in_c1= 32, in_c2= 64,
|
| 254 |
+
out_c1= 64, out_c2=64)
|
| 255 |
+
self.block13 = BlockTypeB(128, 64)
|
| 256 |
+
|
| 257 |
+
self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64,
|
| 258 |
+
out_c1= 32, out_c2= 32)
|
| 259 |
+
self.block15 = BlockTypeB(64, 64)
|
| 260 |
+
|
| 261 |
+
self.block16 = BlockTypeC(64, 16)
|
| 262 |
+
|
| 263 |
+
def forward(self, x):
|
| 264 |
+
c2, c3, c4 = self.backbone(x)
|
| 265 |
+
|
| 266 |
+
x = self.block12(c3, c4)
|
| 267 |
+
x = self.block13(x)
|
| 268 |
+
x = self.block14(c2, x)
|
| 269 |
+
x = self.block15(x)
|
| 270 |
+
x = self.block16(x)
|
| 271 |
+
x = x[:, 7:, :, :]
|
| 272 |
+
#print(x.shape)
|
| 273 |
+
x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True)
|
| 274 |
+
|
| 275 |
+
return x
|
RAVE-main/annotator/mlsd/utils.py
ADDED
|
@@ -0,0 +1,581 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
modified by lihaoweicv
|
| 3 |
+
pytorch version
|
| 4 |
+
'''
|
| 5 |
+
|
| 6 |
+
'''
|
| 7 |
+
M-LSD
|
| 8 |
+
Copyright 2021-present NAVER Corp.
|
| 9 |
+
Apache License v2.0
|
| 10 |
+
'''
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import numpy as np
|
| 14 |
+
import cv2
|
| 15 |
+
import torch
|
| 16 |
+
from torch.nn import functional as F
|
| 17 |
+
from modules import devices
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
|
| 21 |
+
'''
|
| 22 |
+
tpMap:
|
| 23 |
+
center: tpMap[1, 0, :, :]
|
| 24 |
+
displacement: tpMap[1, 1:5, :, :]
|
| 25 |
+
'''
|
| 26 |
+
b, c, h, w = tpMap.shape
|
| 27 |
+
assert b==1, 'only support bsize==1'
|
| 28 |
+
displacement = tpMap[:, 1:5, :, :][0]
|
| 29 |
+
center = tpMap[:, 0, :, :]
|
| 30 |
+
heat = torch.sigmoid(center)
|
| 31 |
+
hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
|
| 32 |
+
keep = (hmax == heat).float()
|
| 33 |
+
heat = heat * keep
|
| 34 |
+
heat = heat.reshape(-1, )
|
| 35 |
+
|
| 36 |
+
scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
|
| 37 |
+
yy = torch.floor_divide(indices, w).unsqueeze(-1)
|
| 38 |
+
xx = torch.fmod(indices, w).unsqueeze(-1)
|
| 39 |
+
ptss = torch.cat((yy, xx),dim=-1)
|
| 40 |
+
|
| 41 |
+
ptss = ptss.detach().cpu().numpy()
|
| 42 |
+
scores = scores.detach().cpu().numpy()
|
| 43 |
+
displacement = displacement.detach().cpu().numpy()
|
| 44 |
+
displacement = displacement.transpose((1,2,0))
|
| 45 |
+
return ptss, scores, displacement
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def pred_lines(image, model,
|
| 49 |
+
input_shape=[512, 512],
|
| 50 |
+
score_thr=0.10,
|
| 51 |
+
dist_thr=20.0):
|
| 52 |
+
h, w, _ = image.shape
|
| 53 |
+
h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
|
| 54 |
+
|
| 55 |
+
resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
|
| 56 |
+
np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
|
| 57 |
+
|
| 58 |
+
resized_image = resized_image.transpose((2,0,1))
|
| 59 |
+
batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
|
| 60 |
+
batch_image = (batch_image / 127.5) - 1.0
|
| 61 |
+
|
| 62 |
+
batch_image = torch.from_numpy(batch_image).float().to(devices.get_device_for("controlnet"))
|
| 63 |
+
outputs = model(batch_image)
|
| 64 |
+
pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
|
| 65 |
+
start = vmap[:, :, :2]
|
| 66 |
+
end = vmap[:, :, 2:]
|
| 67 |
+
dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
|
| 68 |
+
|
| 69 |
+
segments_list = []
|
| 70 |
+
for center, score in zip(pts, pts_score):
|
| 71 |
+
y, x = center
|
| 72 |
+
distance = dist_map[y, x]
|
| 73 |
+
if score > score_thr and distance > dist_thr:
|
| 74 |
+
disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
|
| 75 |
+
x_start = x + disp_x_start
|
| 76 |
+
y_start = y + disp_y_start
|
| 77 |
+
x_end = x + disp_x_end
|
| 78 |
+
y_end = y + disp_y_end
|
| 79 |
+
segments_list.append([x_start, y_start, x_end, y_end])
|
| 80 |
+
|
| 81 |
+
lines = 2 * np.array(segments_list) # 256 > 512
|
| 82 |
+
lines[:, 0] = lines[:, 0] * w_ratio
|
| 83 |
+
lines[:, 1] = lines[:, 1] * h_ratio
|
| 84 |
+
lines[:, 2] = lines[:, 2] * w_ratio
|
| 85 |
+
lines[:, 3] = lines[:, 3] * h_ratio
|
| 86 |
+
|
| 87 |
+
return lines
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def pred_squares(image,
|
| 91 |
+
model,
|
| 92 |
+
input_shape=[512, 512],
|
| 93 |
+
params={'score': 0.06,
|
| 94 |
+
'outside_ratio': 0.28,
|
| 95 |
+
'inside_ratio': 0.45,
|
| 96 |
+
'w_overlap': 0.0,
|
| 97 |
+
'w_degree': 1.95,
|
| 98 |
+
'w_length': 0.0,
|
| 99 |
+
'w_area': 1.86,
|
| 100 |
+
'w_center': 0.14}):
|
| 101 |
+
'''
|
| 102 |
+
shape = [height, width]
|
| 103 |
+
'''
|
| 104 |
+
h, w, _ = image.shape
|
| 105 |
+
original_shape = [h, w]
|
| 106 |
+
|
| 107 |
+
resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
|
| 108 |
+
np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
|
| 109 |
+
resized_image = resized_image.transpose((2, 0, 1))
|
| 110 |
+
batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
|
| 111 |
+
batch_image = (batch_image / 127.5) - 1.0
|
| 112 |
+
|
| 113 |
+
batch_image = torch.from_numpy(batch_image).float().to(devices.get_device_for("controlnet"))
|
| 114 |
+
outputs = model(batch_image)
|
| 115 |
+
|
| 116 |
+
pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
|
| 117 |
+
start = vmap[:, :, :2] # (x, y)
|
| 118 |
+
end = vmap[:, :, 2:] # (x, y)
|
| 119 |
+
dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
|
| 120 |
+
|
| 121 |
+
junc_list = []
|
| 122 |
+
segments_list = []
|
| 123 |
+
for junc, score in zip(pts, pts_score):
|
| 124 |
+
y, x = junc
|
| 125 |
+
distance = dist_map[y, x]
|
| 126 |
+
if score > params['score'] and distance > 20.0:
|
| 127 |
+
junc_list.append([x, y])
|
| 128 |
+
disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
|
| 129 |
+
d_arrow = 1.0
|
| 130 |
+
x_start = x + d_arrow * disp_x_start
|
| 131 |
+
y_start = y + d_arrow * disp_y_start
|
| 132 |
+
x_end = x + d_arrow * disp_x_end
|
| 133 |
+
y_end = y + d_arrow * disp_y_end
|
| 134 |
+
segments_list.append([x_start, y_start, x_end, y_end])
|
| 135 |
+
|
| 136 |
+
segments = np.array(segments_list)
|
| 137 |
+
|
| 138 |
+
####### post processing for squares
|
| 139 |
+
# 1. get unique lines
|
| 140 |
+
point = np.array([[0, 0]])
|
| 141 |
+
point = point[0]
|
| 142 |
+
start = segments[:, :2]
|
| 143 |
+
end = segments[:, 2:]
|
| 144 |
+
diff = start - end
|
| 145 |
+
a = diff[:, 1]
|
| 146 |
+
b = -diff[:, 0]
|
| 147 |
+
c = a * start[:, 0] + b * start[:, 1]
|
| 148 |
+
|
| 149 |
+
d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
|
| 150 |
+
theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
|
| 151 |
+
theta[theta < 0.0] += 180
|
| 152 |
+
hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
|
| 153 |
+
|
| 154 |
+
d_quant = 1
|
| 155 |
+
theta_quant = 2
|
| 156 |
+
hough[:, 0] //= d_quant
|
| 157 |
+
hough[:, 1] //= theta_quant
|
| 158 |
+
_, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
|
| 159 |
+
|
| 160 |
+
acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
|
| 161 |
+
idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
|
| 162 |
+
yx_indices = hough[indices, :].astype('int32')
|
| 163 |
+
acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
|
| 164 |
+
idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
|
| 165 |
+
|
| 166 |
+
acc_map_np = acc_map
|
| 167 |
+
# acc_map = acc_map[None, :, :, None]
|
| 168 |
+
#
|
| 169 |
+
# ### fast suppression using tensorflow op
|
| 170 |
+
# acc_map = tf.constant(acc_map, dtype=tf.float32)
|
| 171 |
+
# max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
|
| 172 |
+
# acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
|
| 173 |
+
# flatten_acc_map = tf.reshape(acc_map, [1, -1])
|
| 174 |
+
# topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
|
| 175 |
+
# _, h, w, _ = acc_map.shape
|
| 176 |
+
# y = tf.expand_dims(topk_indices // w, axis=-1)
|
| 177 |
+
# x = tf.expand_dims(topk_indices % w, axis=-1)
|
| 178 |
+
# yx = tf.concat([y, x], axis=-1)
|
| 179 |
+
|
| 180 |
+
### fast suppression using pytorch op
|
| 181 |
+
acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
|
| 182 |
+
_,_, h, w = acc_map.shape
|
| 183 |
+
max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2)
|
| 184 |
+
acc_map = acc_map * ( (acc_map == max_acc_map).float() )
|
| 185 |
+
flatten_acc_map = acc_map.reshape([-1, ])
|
| 186 |
+
|
| 187 |
+
scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
|
| 188 |
+
yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
|
| 189 |
+
xx = torch.fmod(indices, w).unsqueeze(-1)
|
| 190 |
+
yx = torch.cat((yy, xx), dim=-1)
|
| 191 |
+
|
| 192 |
+
yx = yx.detach().cpu().numpy()
|
| 193 |
+
|
| 194 |
+
topk_values = scores.detach().cpu().numpy()
|
| 195 |
+
indices = idx_map[yx[:, 0], yx[:, 1]]
|
| 196 |
+
basis = 5 // 2
|
| 197 |
+
|
| 198 |
+
merged_segments = []
|
| 199 |
+
for yx_pt, max_indice, value in zip(yx, indices, topk_values):
|
| 200 |
+
y, x = yx_pt
|
| 201 |
+
if max_indice == -1 or value == 0:
|
| 202 |
+
continue
|
| 203 |
+
segment_list = []
|
| 204 |
+
for y_offset in range(-basis, basis + 1):
|
| 205 |
+
for x_offset in range(-basis, basis + 1):
|
| 206 |
+
indice = idx_map[y + y_offset, x + x_offset]
|
| 207 |
+
cnt = int(acc_map_np[y + y_offset, x + x_offset])
|
| 208 |
+
if indice != -1:
|
| 209 |
+
segment_list.append(segments[indice])
|
| 210 |
+
if cnt > 1:
|
| 211 |
+
check_cnt = 1
|
| 212 |
+
current_hough = hough[indice]
|
| 213 |
+
for new_indice, new_hough in enumerate(hough):
|
| 214 |
+
if (current_hough == new_hough).all() and indice != new_indice:
|
| 215 |
+
segment_list.append(segments[new_indice])
|
| 216 |
+
check_cnt += 1
|
| 217 |
+
if check_cnt == cnt:
|
| 218 |
+
break
|
| 219 |
+
group_segments = np.array(segment_list).reshape([-1, 2])
|
| 220 |
+
sorted_group_segments = np.sort(group_segments, axis=0)
|
| 221 |
+
x_min, y_min = sorted_group_segments[0, :]
|
| 222 |
+
x_max, y_max = sorted_group_segments[-1, :]
|
| 223 |
+
|
| 224 |
+
deg = theta[max_indice]
|
| 225 |
+
if deg >= 90:
|
| 226 |
+
merged_segments.append([x_min, y_max, x_max, y_min])
|
| 227 |
+
else:
|
| 228 |
+
merged_segments.append([x_min, y_min, x_max, y_max])
|
| 229 |
+
|
| 230 |
+
# 2. get intersections
|
| 231 |
+
new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
|
| 232 |
+
start = new_segments[:, :2] # (x1, y1)
|
| 233 |
+
end = new_segments[:, 2:] # (x2, y2)
|
| 234 |
+
new_centers = (start + end) / 2.0
|
| 235 |
+
diff = start - end
|
| 236 |
+
dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
|
| 237 |
+
|
| 238 |
+
# ax + by = c
|
| 239 |
+
a = diff[:, 1]
|
| 240 |
+
b = -diff[:, 0]
|
| 241 |
+
c = a * start[:, 0] + b * start[:, 1]
|
| 242 |
+
pre_det = a[:, None] * b[None, :]
|
| 243 |
+
det = pre_det - np.transpose(pre_det)
|
| 244 |
+
|
| 245 |
+
pre_inter_y = a[:, None] * c[None, :]
|
| 246 |
+
inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
|
| 247 |
+
pre_inter_x = c[:, None] * b[None, :]
|
| 248 |
+
inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
|
| 249 |
+
inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
|
| 250 |
+
|
| 251 |
+
# 3. get corner information
|
| 252 |
+
# 3.1 get distance
|
| 253 |
+
'''
|
| 254 |
+
dist_segments:
|
| 255 |
+
| dist(0), dist(1), dist(2), ...|
|
| 256 |
+
dist_inter_to_segment1:
|
| 257 |
+
| dist(inter,0), dist(inter,0), dist(inter,0), ... |
|
| 258 |
+
| dist(inter,1), dist(inter,1), dist(inter,1), ... |
|
| 259 |
+
...
|
| 260 |
+
dist_inter_to_semgnet2:
|
| 261 |
+
| dist(inter,0), dist(inter,1), dist(inter,2), ... |
|
| 262 |
+
| dist(inter,0), dist(inter,1), dist(inter,2), ... |
|
| 263 |
+
...
|
| 264 |
+
'''
|
| 265 |
+
|
| 266 |
+
dist_inter_to_segment1_start = np.sqrt(
|
| 267 |
+
np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
|
| 268 |
+
dist_inter_to_segment1_end = np.sqrt(
|
| 269 |
+
np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
|
| 270 |
+
dist_inter_to_segment2_start = np.sqrt(
|
| 271 |
+
np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
|
| 272 |
+
dist_inter_to_segment2_end = np.sqrt(
|
| 273 |
+
np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
|
| 274 |
+
|
| 275 |
+
# sort ascending
|
| 276 |
+
dist_inter_to_segment1 = np.sort(
|
| 277 |
+
np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
|
| 278 |
+
axis=-1) # [n_batch, n_batch, 2]
|
| 279 |
+
dist_inter_to_segment2 = np.sort(
|
| 280 |
+
np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
|
| 281 |
+
axis=-1) # [n_batch, n_batch, 2]
|
| 282 |
+
|
| 283 |
+
# 3.2 get degree
|
| 284 |
+
inter_to_start = new_centers[:, None, :] - inter_pts
|
| 285 |
+
deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
|
| 286 |
+
deg_inter_to_start[deg_inter_to_start < 0.0] += 360
|
| 287 |
+
inter_to_end = new_centers[None, :, :] - inter_pts
|
| 288 |
+
deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
|
| 289 |
+
deg_inter_to_end[deg_inter_to_end < 0.0] += 360
|
| 290 |
+
|
| 291 |
+
'''
|
| 292 |
+
B -- G
|
| 293 |
+
| |
|
| 294 |
+
C -- R
|
| 295 |
+
B : blue / G: green / C: cyan / R: red
|
| 296 |
+
|
| 297 |
+
0 -- 1
|
| 298 |
+
| |
|
| 299 |
+
3 -- 2
|
| 300 |
+
'''
|
| 301 |
+
# rename variables
|
| 302 |
+
deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
|
| 303 |
+
# sort deg ascending
|
| 304 |
+
deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
|
| 305 |
+
|
| 306 |
+
deg_diff_map = np.abs(deg1_map - deg2_map)
|
| 307 |
+
# we only consider the smallest degree of intersect
|
| 308 |
+
deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
|
| 309 |
+
|
| 310 |
+
# define available degree range
|
| 311 |
+
deg_range = [60, 120]
|
| 312 |
+
|
| 313 |
+
corner_dict = {corner_info: [] for corner_info in range(4)}
|
| 314 |
+
inter_points = []
|
| 315 |
+
for i in range(inter_pts.shape[0]):
|
| 316 |
+
for j in range(i + 1, inter_pts.shape[1]):
|
| 317 |
+
# i, j > line index, always i < j
|
| 318 |
+
x, y = inter_pts[i, j, :]
|
| 319 |
+
deg1, deg2 = deg_sort[i, j, :]
|
| 320 |
+
deg_diff = deg_diff_map[i, j]
|
| 321 |
+
|
| 322 |
+
check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
|
| 323 |
+
|
| 324 |
+
outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
|
| 325 |
+
inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
|
| 326 |
+
check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
|
| 327 |
+
dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
|
| 328 |
+
(dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
|
| 329 |
+
dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
|
| 330 |
+
((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
|
| 331 |
+
dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
|
| 332 |
+
(dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
|
| 333 |
+
dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
|
| 334 |
+
|
| 335 |
+
if check_degree and check_distance:
|
| 336 |
+
corner_info = None
|
| 337 |
+
|
| 338 |
+
if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
|
| 339 |
+
(deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
|
| 340 |
+
corner_info, color_info = 0, 'blue'
|
| 341 |
+
elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
|
| 342 |
+
corner_info, color_info = 1, 'green'
|
| 343 |
+
elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
|
| 344 |
+
corner_info, color_info = 2, 'black'
|
| 345 |
+
elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
|
| 346 |
+
(deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
|
| 347 |
+
corner_info, color_info = 3, 'cyan'
|
| 348 |
+
else:
|
| 349 |
+
corner_info, color_info = 4, 'red' # we don't use it
|
| 350 |
+
continue
|
| 351 |
+
|
| 352 |
+
corner_dict[corner_info].append([x, y, i, j])
|
| 353 |
+
inter_points.append([x, y])
|
| 354 |
+
|
| 355 |
+
square_list = []
|
| 356 |
+
connect_list = []
|
| 357 |
+
segments_list = []
|
| 358 |
+
for corner0 in corner_dict[0]:
|
| 359 |
+
for corner1 in corner_dict[1]:
|
| 360 |
+
connect01 = False
|
| 361 |
+
for corner0_line in corner0[2:]:
|
| 362 |
+
if corner0_line in corner1[2:]:
|
| 363 |
+
connect01 = True
|
| 364 |
+
break
|
| 365 |
+
if connect01:
|
| 366 |
+
for corner2 in corner_dict[2]:
|
| 367 |
+
connect12 = False
|
| 368 |
+
for corner1_line in corner1[2:]:
|
| 369 |
+
if corner1_line in corner2[2:]:
|
| 370 |
+
connect12 = True
|
| 371 |
+
break
|
| 372 |
+
if connect12:
|
| 373 |
+
for corner3 in corner_dict[3]:
|
| 374 |
+
connect23 = False
|
| 375 |
+
for corner2_line in corner2[2:]:
|
| 376 |
+
if corner2_line in corner3[2:]:
|
| 377 |
+
connect23 = True
|
| 378 |
+
break
|
| 379 |
+
if connect23:
|
| 380 |
+
for corner3_line in corner3[2:]:
|
| 381 |
+
if corner3_line in corner0[2:]:
|
| 382 |
+
# SQUARE!!!
|
| 383 |
+
'''
|
| 384 |
+
0 -- 1
|
| 385 |
+
| |
|
| 386 |
+
3 -- 2
|
| 387 |
+
square_list:
|
| 388 |
+
order: 0 > 1 > 2 > 3
|
| 389 |
+
| x0, y0, x1, y1, x2, y2, x3, y3 |
|
| 390 |
+
| x0, y0, x1, y1, x2, y2, x3, y3 |
|
| 391 |
+
...
|
| 392 |
+
connect_list:
|
| 393 |
+
order: 01 > 12 > 23 > 30
|
| 394 |
+
| line_idx01, line_idx12, line_idx23, line_idx30 |
|
| 395 |
+
| line_idx01, line_idx12, line_idx23, line_idx30 |
|
| 396 |
+
...
|
| 397 |
+
segments_list:
|
| 398 |
+
order: 0 > 1 > 2 > 3
|
| 399 |
+
| line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
|
| 400 |
+
| line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
|
| 401 |
+
...
|
| 402 |
+
'''
|
| 403 |
+
square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
|
| 404 |
+
connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
|
| 405 |
+
segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
|
| 406 |
+
|
| 407 |
+
def check_outside_inside(segments_info, connect_idx):
|
| 408 |
+
# return 'outside or inside', min distance, cover_param, peri_param
|
| 409 |
+
if connect_idx == segments_info[0]:
|
| 410 |
+
check_dist_mat = dist_inter_to_segment1
|
| 411 |
+
else:
|
| 412 |
+
check_dist_mat = dist_inter_to_segment2
|
| 413 |
+
|
| 414 |
+
i, j = segments_info
|
| 415 |
+
min_dist, max_dist = check_dist_mat[i, j, :]
|
| 416 |
+
connect_dist = dist_segments[connect_idx]
|
| 417 |
+
if max_dist > connect_dist:
|
| 418 |
+
return 'outside', min_dist, 0, 1
|
| 419 |
+
else:
|
| 420 |
+
return 'inside', min_dist, -1, -1
|
| 421 |
+
|
| 422 |
+
top_square = None
|
| 423 |
+
|
| 424 |
+
try:
|
| 425 |
+
map_size = input_shape[0] / 2
|
| 426 |
+
squares = np.array(square_list).reshape([-1, 4, 2])
|
| 427 |
+
score_array = []
|
| 428 |
+
connect_array = np.array(connect_list)
|
| 429 |
+
segments_array = np.array(segments_list).reshape([-1, 4, 2])
|
| 430 |
+
|
| 431 |
+
# get degree of corners:
|
| 432 |
+
squares_rollup = np.roll(squares, 1, axis=1)
|
| 433 |
+
squares_rolldown = np.roll(squares, -1, axis=1)
|
| 434 |
+
vec1 = squares_rollup - squares
|
| 435 |
+
normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
|
| 436 |
+
vec2 = squares_rolldown - squares
|
| 437 |
+
normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
|
| 438 |
+
inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
|
| 439 |
+
squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
|
| 440 |
+
|
| 441 |
+
# get square score
|
| 442 |
+
overlap_scores = []
|
| 443 |
+
degree_scores = []
|
| 444 |
+
length_scores = []
|
| 445 |
+
|
| 446 |
+
for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree):
|
| 447 |
+
'''
|
| 448 |
+
0 -- 1
|
| 449 |
+
| |
|
| 450 |
+
3 -- 2
|
| 451 |
+
|
| 452 |
+
# segments: [4, 2]
|
| 453 |
+
# connects: [4]
|
| 454 |
+
'''
|
| 455 |
+
|
| 456 |
+
###################################### OVERLAP SCORES
|
| 457 |
+
cover = 0
|
| 458 |
+
perimeter = 0
|
| 459 |
+
# check 0 > 1 > 2 > 3
|
| 460 |
+
square_length = []
|
| 461 |
+
|
| 462 |
+
for start_idx in range(4):
|
| 463 |
+
end_idx = (start_idx + 1) % 4
|
| 464 |
+
|
| 465 |
+
connect_idx = connects[start_idx] # segment idx of segment01
|
| 466 |
+
start_segments = segments[start_idx]
|
| 467 |
+
end_segments = segments[end_idx]
|
| 468 |
+
|
| 469 |
+
start_point = square[start_idx]
|
| 470 |
+
end_point = square[end_idx]
|
| 471 |
+
|
| 472 |
+
# check whether outside or inside
|
| 473 |
+
start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
|
| 474 |
+
connect_idx)
|
| 475 |
+
end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
|
| 476 |
+
|
| 477 |
+
cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
|
| 478 |
+
perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
|
| 479 |
+
|
| 480 |
+
square_length.append(
|
| 481 |
+
dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
|
| 482 |
+
|
| 483 |
+
overlap_scores.append(cover / perimeter)
|
| 484 |
+
######################################
|
| 485 |
+
###################################### DEGREE SCORES
|
| 486 |
+
'''
|
| 487 |
+
deg0 vs deg2
|
| 488 |
+
deg1 vs deg3
|
| 489 |
+
'''
|
| 490 |
+
deg0, deg1, deg2, deg3 = degree
|
| 491 |
+
deg_ratio1 = deg0 / deg2
|
| 492 |
+
if deg_ratio1 > 1.0:
|
| 493 |
+
deg_ratio1 = 1 / deg_ratio1
|
| 494 |
+
deg_ratio2 = deg1 / deg3
|
| 495 |
+
if deg_ratio2 > 1.0:
|
| 496 |
+
deg_ratio2 = 1 / deg_ratio2
|
| 497 |
+
degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
|
| 498 |
+
######################################
|
| 499 |
+
###################################### LENGTH SCORES
|
| 500 |
+
'''
|
| 501 |
+
len0 vs len2
|
| 502 |
+
len1 vs len3
|
| 503 |
+
'''
|
| 504 |
+
len0, len1, len2, len3 = square_length
|
| 505 |
+
len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
|
| 506 |
+
len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
|
| 507 |
+
length_scores.append((len_ratio1 + len_ratio2) / 2)
|
| 508 |
+
|
| 509 |
+
######################################
|
| 510 |
+
|
| 511 |
+
overlap_scores = np.array(overlap_scores)
|
| 512 |
+
overlap_scores /= np.max(overlap_scores)
|
| 513 |
+
|
| 514 |
+
degree_scores = np.array(degree_scores)
|
| 515 |
+
# degree_scores /= np.max(degree_scores)
|
| 516 |
+
|
| 517 |
+
length_scores = np.array(length_scores)
|
| 518 |
+
|
| 519 |
+
###################################### AREA SCORES
|
| 520 |
+
area_scores = np.reshape(squares, [-1, 4, 2])
|
| 521 |
+
area_x = area_scores[:, :, 0]
|
| 522 |
+
area_y = area_scores[:, :, 1]
|
| 523 |
+
correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
|
| 524 |
+
area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
|
| 525 |
+
area_scores = 0.5 * np.abs(area_scores + correction)
|
| 526 |
+
area_scores /= (map_size * map_size) # np.max(area_scores)
|
| 527 |
+
######################################
|
| 528 |
+
|
| 529 |
+
###################################### CENTER SCORES
|
| 530 |
+
centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
|
| 531 |
+
# squares: [n, 4, 2]
|
| 532 |
+
square_centers = np.mean(squares, axis=1) # [n, 2]
|
| 533 |
+
center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
|
| 534 |
+
center_scores = center2center / (map_size / np.sqrt(2.0))
|
| 535 |
+
|
| 536 |
+
'''
|
| 537 |
+
score_w = [overlap, degree, area, center, length]
|
| 538 |
+
'''
|
| 539 |
+
score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
|
| 540 |
+
score_array = params['w_overlap'] * overlap_scores \
|
| 541 |
+
+ params['w_degree'] * degree_scores \
|
| 542 |
+
+ params['w_area'] * area_scores \
|
| 543 |
+
- params['w_center'] * center_scores \
|
| 544 |
+
+ params['w_length'] * length_scores
|
| 545 |
+
|
| 546 |
+
best_square = []
|
| 547 |
+
|
| 548 |
+
sorted_idx = np.argsort(score_array)[::-1]
|
| 549 |
+
score_array = score_array[sorted_idx]
|
| 550 |
+
squares = squares[sorted_idx]
|
| 551 |
+
|
| 552 |
+
except Exception as e:
|
| 553 |
+
pass
|
| 554 |
+
|
| 555 |
+
'''return list
|
| 556 |
+
merged_lines, squares, scores
|
| 557 |
+
'''
|
| 558 |
+
|
| 559 |
+
try:
|
| 560 |
+
new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
|
| 561 |
+
new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
|
| 562 |
+
new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
|
| 563 |
+
new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
|
| 564 |
+
except:
|
| 565 |
+
new_segments = []
|
| 566 |
+
|
| 567 |
+
try:
|
| 568 |
+
squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
|
| 569 |
+
squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
|
| 570 |
+
except:
|
| 571 |
+
squares = []
|
| 572 |
+
score_array = []
|
| 573 |
+
|
| 574 |
+
try:
|
| 575 |
+
inter_points = np.array(inter_points)
|
| 576 |
+
inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
|
| 577 |
+
inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
|
| 578 |
+
except:
|
| 579 |
+
inter_points = []
|
| 580 |
+
|
| 581 |
+
return new_segments, squares, score_array, inter_points
|
RAVE-main/annotator/mmpkg/mmseg/apis/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .inference import inference_segmentor, init_segmentor, show_result_pyplot
|
| 2 |
+
from .test import multi_gpu_test, single_gpu_test
|
| 3 |
+
from .train import get_root_logger, set_random_seed, train_segmentor
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'get_root_logger', 'set_random_seed', 'train_segmentor', 'init_segmentor',
|
| 7 |
+
'inference_segmentor', 'multi_gpu_test', 'single_gpu_test',
|
| 8 |
+
'show_result_pyplot'
|
| 9 |
+
]
|
RAVE-main/annotator/mmpkg/mmseg/apis/inference.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 3 |
+
import torch
|
| 4 |
+
from annotator.mmpkg.mmcv.parallel import collate, scatter
|
| 5 |
+
from annotator.mmpkg.mmcv.runner import load_checkpoint
|
| 6 |
+
|
| 7 |
+
from annotator.mmpkg.mmseg.datasets.pipelines import Compose
|
| 8 |
+
from annotator.mmpkg.mmseg.models import build_segmentor
|
| 9 |
+
from modules import devices
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def init_segmentor(config, checkpoint=None, device=devices.get_device_for("controlnet")):
|
| 13 |
+
"""Initialize a segmentor from config file.
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
config (str or :obj:`mmcv.Config`): Config file path or the config
|
| 17 |
+
object.
|
| 18 |
+
checkpoint (str, optional): Checkpoint path. If left as None, the model
|
| 19 |
+
will not load any weights.
|
| 20 |
+
device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
|
| 21 |
+
Use 'cpu' for loading model on CPU.
|
| 22 |
+
Returns:
|
| 23 |
+
nn.Module: The constructed segmentor.
|
| 24 |
+
"""
|
| 25 |
+
if isinstance(config, str):
|
| 26 |
+
config = mmcv.Config.fromfile(config)
|
| 27 |
+
elif not isinstance(config, mmcv.Config):
|
| 28 |
+
raise TypeError('config must be a filename or Config object, '
|
| 29 |
+
'but got {}'.format(type(config)))
|
| 30 |
+
config.model.pretrained = None
|
| 31 |
+
config.model.train_cfg = None
|
| 32 |
+
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
|
| 33 |
+
if checkpoint is not None:
|
| 34 |
+
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
|
| 35 |
+
model.CLASSES = checkpoint['meta']['CLASSES']
|
| 36 |
+
model.PALETTE = checkpoint['meta']['PALETTE']
|
| 37 |
+
model.cfg = config # save the config in the model for convenience
|
| 38 |
+
model.to(device)
|
| 39 |
+
model.eval()
|
| 40 |
+
return model
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class LoadImage:
|
| 44 |
+
"""A simple pipeline to load image."""
|
| 45 |
+
|
| 46 |
+
def __call__(self, results):
|
| 47 |
+
"""Call function to load images into results.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
results (dict): A result dict contains the file name
|
| 51 |
+
of the image to be read.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
dict: ``results`` will be returned containing loaded image.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
if isinstance(results['img'], str):
|
| 58 |
+
results['filename'] = results['img']
|
| 59 |
+
results['ori_filename'] = results['img']
|
| 60 |
+
else:
|
| 61 |
+
results['filename'] = None
|
| 62 |
+
results['ori_filename'] = None
|
| 63 |
+
img = mmcv.imread(results['img'])
|
| 64 |
+
results['img'] = img
|
| 65 |
+
results['img_shape'] = img.shape
|
| 66 |
+
results['ori_shape'] = img.shape
|
| 67 |
+
return results
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def inference_segmentor(model, img):
|
| 71 |
+
"""Inference image(s) with the segmentor.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
model (nn.Module): The loaded segmentor.
|
| 75 |
+
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
|
| 76 |
+
images.
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
(list[Tensor]): The segmentation result.
|
| 80 |
+
"""
|
| 81 |
+
cfg = model.cfg
|
| 82 |
+
device = next(model.parameters()).device # model device
|
| 83 |
+
# build the data pipeline
|
| 84 |
+
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
|
| 85 |
+
test_pipeline = Compose(test_pipeline)
|
| 86 |
+
# prepare data
|
| 87 |
+
data = dict(img=img)
|
| 88 |
+
data = test_pipeline(data)
|
| 89 |
+
data = collate([data], samples_per_gpu=1)
|
| 90 |
+
if next(model.parameters()).is_cuda:
|
| 91 |
+
# scatter to specified GPU
|
| 92 |
+
data = scatter(data, [device])[0]
|
| 93 |
+
else:
|
| 94 |
+
data['img'][0] = data['img'][0].to(devices.get_device_for("controlnet"))
|
| 95 |
+
data['img_metas'] = [i.data[0] for i in data['img_metas']]
|
| 96 |
+
|
| 97 |
+
# forward the model
|
| 98 |
+
with torch.no_grad():
|
| 99 |
+
result = model(return_loss=False, rescale=True, **data)
|
| 100 |
+
return result
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def show_result_pyplot(model,
|
| 104 |
+
img,
|
| 105 |
+
result,
|
| 106 |
+
palette=None,
|
| 107 |
+
fig_size=(15, 10),
|
| 108 |
+
opacity=0.5,
|
| 109 |
+
title='',
|
| 110 |
+
block=True):
|
| 111 |
+
"""Visualize the segmentation results on the image.
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
model (nn.Module): The loaded segmentor.
|
| 115 |
+
img (str or np.ndarray): Image filename or loaded image.
|
| 116 |
+
result (list): The segmentation result.
|
| 117 |
+
palette (list[list[int]]] | None): The palette of segmentation
|
| 118 |
+
map. If None is given, random palette will be generated.
|
| 119 |
+
Default: None
|
| 120 |
+
fig_size (tuple): Figure size of the pyplot figure.
|
| 121 |
+
opacity(float): Opacity of painted segmentation map.
|
| 122 |
+
Default 0.5.
|
| 123 |
+
Must be in (0, 1] range.
|
| 124 |
+
title (str): The title of pyplot figure.
|
| 125 |
+
Default is ''.
|
| 126 |
+
block (bool): Whether to block the pyplot figure.
|
| 127 |
+
Default is True.
|
| 128 |
+
"""
|
| 129 |
+
if hasattr(model, 'module'):
|
| 130 |
+
model = model.module
|
| 131 |
+
img = model.show_result(
|
| 132 |
+
img, result, palette=palette, show=False, opacity=opacity)
|
| 133 |
+
# plt.figure(figsize=fig_size)
|
| 134 |
+
# plt.imshow(mmcv.bgr2rgb(img))
|
| 135 |
+
# plt.title(title)
|
| 136 |
+
# plt.tight_layout()
|
| 137 |
+
# plt.show(block=block)
|
| 138 |
+
return mmcv.bgr2rgb(img)
|
RAVE-main/annotator/mmpkg/mmseg/apis/test.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
import pickle
|
| 3 |
+
import shutil
|
| 4 |
+
import tempfile
|
| 5 |
+
|
| 6 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import torch.distributed as dist
|
| 10 |
+
from annotator.mmpkg.mmcv.image import tensor2imgs
|
| 11 |
+
from annotator.mmpkg.mmcv.runner import get_dist_info
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def np2tmp(array, temp_file_name=None):
|
| 15 |
+
"""Save ndarray to local numpy file.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
array (ndarray): Ndarray to save.
|
| 19 |
+
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
|
| 20 |
+
function will generate a file name with tempfile.NamedTemporaryFile
|
| 21 |
+
to save ndarray. Default: None.
|
| 22 |
+
|
| 23 |
+
Returns:
|
| 24 |
+
str: The numpy file name.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
if temp_file_name is None:
|
| 28 |
+
temp_file_name = tempfile.NamedTemporaryFile(
|
| 29 |
+
suffix='.npy', delete=False).name
|
| 30 |
+
np.save(temp_file_name, array)
|
| 31 |
+
return temp_file_name
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def single_gpu_test(model,
|
| 35 |
+
data_loader,
|
| 36 |
+
show=False,
|
| 37 |
+
out_dir=None,
|
| 38 |
+
efficient_test=False,
|
| 39 |
+
opacity=0.5):
|
| 40 |
+
"""Test with single GPU.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
model (nn.Module): Model to be tested.
|
| 44 |
+
data_loader (utils.data.Dataloader): Pytorch data loader.
|
| 45 |
+
show (bool): Whether show results during inference. Default: False.
|
| 46 |
+
out_dir (str, optional): If specified, the results will be dumped into
|
| 47 |
+
the directory to save output results.
|
| 48 |
+
efficient_test (bool): Whether save the results as local numpy files to
|
| 49 |
+
save CPU memory during evaluation. Default: False.
|
| 50 |
+
opacity(float): Opacity of painted segmentation map.
|
| 51 |
+
Default 0.5.
|
| 52 |
+
Must be in (0, 1] range.
|
| 53 |
+
Returns:
|
| 54 |
+
list: The prediction results.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
model.eval()
|
| 58 |
+
results = []
|
| 59 |
+
dataset = data_loader.dataset
|
| 60 |
+
prog_bar = mmcv.ProgressBar(len(dataset))
|
| 61 |
+
for i, data in enumerate(data_loader):
|
| 62 |
+
with torch.no_grad():
|
| 63 |
+
result = model(return_loss=False, **data)
|
| 64 |
+
|
| 65 |
+
if show or out_dir:
|
| 66 |
+
img_tensor = data['img'][0]
|
| 67 |
+
img_metas = data['img_metas'][0].data[0]
|
| 68 |
+
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
|
| 69 |
+
assert len(imgs) == len(img_metas)
|
| 70 |
+
|
| 71 |
+
for img, img_meta in zip(imgs, img_metas):
|
| 72 |
+
h, w, _ = img_meta['img_shape']
|
| 73 |
+
img_show = img[:h, :w, :]
|
| 74 |
+
|
| 75 |
+
ori_h, ori_w = img_meta['ori_shape'][:-1]
|
| 76 |
+
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
|
| 77 |
+
|
| 78 |
+
if out_dir:
|
| 79 |
+
out_file = osp.join(out_dir, img_meta['ori_filename'])
|
| 80 |
+
else:
|
| 81 |
+
out_file = None
|
| 82 |
+
|
| 83 |
+
model.module.show_result(
|
| 84 |
+
img_show,
|
| 85 |
+
result,
|
| 86 |
+
palette=dataset.PALETTE,
|
| 87 |
+
show=show,
|
| 88 |
+
out_file=out_file,
|
| 89 |
+
opacity=opacity)
|
| 90 |
+
|
| 91 |
+
if isinstance(result, list):
|
| 92 |
+
if efficient_test:
|
| 93 |
+
result = [np2tmp(_) for _ in result]
|
| 94 |
+
results.extend(result)
|
| 95 |
+
else:
|
| 96 |
+
if efficient_test:
|
| 97 |
+
result = np2tmp(result)
|
| 98 |
+
results.append(result)
|
| 99 |
+
|
| 100 |
+
batch_size = len(result)
|
| 101 |
+
for _ in range(batch_size):
|
| 102 |
+
prog_bar.update()
|
| 103 |
+
return results
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def multi_gpu_test(model,
|
| 107 |
+
data_loader,
|
| 108 |
+
tmpdir=None,
|
| 109 |
+
gpu_collect=False,
|
| 110 |
+
efficient_test=False):
|
| 111 |
+
"""Test model with multiple gpus.
|
| 112 |
+
|
| 113 |
+
This method tests model with multiple gpus and collects the results
|
| 114 |
+
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
|
| 115 |
+
it encodes results to gpu tensors and use gpu communication for results
|
| 116 |
+
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
|
| 117 |
+
and collects them by the rank 0 worker.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
model (nn.Module): Model to be tested.
|
| 121 |
+
data_loader (utils.data.Dataloader): Pytorch data loader.
|
| 122 |
+
tmpdir (str): Path of directory to save the temporary results from
|
| 123 |
+
different gpus under cpu mode.
|
| 124 |
+
gpu_collect (bool): Option to use either gpu or cpu to collect results.
|
| 125 |
+
efficient_test (bool): Whether save the results as local numpy files to
|
| 126 |
+
save CPU memory during evaluation. Default: False.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
list: The prediction results.
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
model.eval()
|
| 133 |
+
results = []
|
| 134 |
+
dataset = data_loader.dataset
|
| 135 |
+
rank, world_size = get_dist_info()
|
| 136 |
+
if rank == 0:
|
| 137 |
+
prog_bar = mmcv.ProgressBar(len(dataset))
|
| 138 |
+
for i, data in enumerate(data_loader):
|
| 139 |
+
with torch.no_grad():
|
| 140 |
+
result = model(return_loss=False, rescale=True, **data)
|
| 141 |
+
|
| 142 |
+
if isinstance(result, list):
|
| 143 |
+
if efficient_test:
|
| 144 |
+
result = [np2tmp(_) for _ in result]
|
| 145 |
+
results.extend(result)
|
| 146 |
+
else:
|
| 147 |
+
if efficient_test:
|
| 148 |
+
result = np2tmp(result)
|
| 149 |
+
results.append(result)
|
| 150 |
+
|
| 151 |
+
if rank == 0:
|
| 152 |
+
batch_size = data['img'][0].size(0)
|
| 153 |
+
for _ in range(batch_size * world_size):
|
| 154 |
+
prog_bar.update()
|
| 155 |
+
|
| 156 |
+
# collect results from all ranks
|
| 157 |
+
if gpu_collect:
|
| 158 |
+
results = collect_results_gpu(results, len(dataset))
|
| 159 |
+
else:
|
| 160 |
+
results = collect_results_cpu(results, len(dataset), tmpdir)
|
| 161 |
+
return results
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def collect_results_cpu(result_part, size, tmpdir=None):
|
| 165 |
+
"""Collect results with CPU."""
|
| 166 |
+
rank, world_size = get_dist_info()
|
| 167 |
+
# create a tmp dir if it is not specified
|
| 168 |
+
if tmpdir is None:
|
| 169 |
+
MAX_LEN = 512
|
| 170 |
+
# 32 is whitespace
|
| 171 |
+
dir_tensor = torch.full((MAX_LEN, ),
|
| 172 |
+
32,
|
| 173 |
+
dtype=torch.uint8,
|
| 174 |
+
device='cuda')
|
| 175 |
+
if rank == 0:
|
| 176 |
+
tmpdir = tempfile.mkdtemp()
|
| 177 |
+
tmpdir = torch.tensor(
|
| 178 |
+
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
|
| 179 |
+
dir_tensor[:len(tmpdir)] = tmpdir
|
| 180 |
+
dist.broadcast(dir_tensor, 0)
|
| 181 |
+
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
|
| 182 |
+
else:
|
| 183 |
+
mmcv.mkdir_or_exist(tmpdir)
|
| 184 |
+
# dump the part result to the dir
|
| 185 |
+
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
|
| 186 |
+
dist.barrier()
|
| 187 |
+
# collect all parts
|
| 188 |
+
if rank != 0:
|
| 189 |
+
return None
|
| 190 |
+
else:
|
| 191 |
+
# load results of all parts from tmp dir
|
| 192 |
+
part_list = []
|
| 193 |
+
for i in range(world_size):
|
| 194 |
+
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
|
| 195 |
+
part_list.append(mmcv.load(part_file))
|
| 196 |
+
# sort the results
|
| 197 |
+
ordered_results = []
|
| 198 |
+
for res in zip(*part_list):
|
| 199 |
+
ordered_results.extend(list(res))
|
| 200 |
+
# the dataloader may pad some samples
|
| 201 |
+
ordered_results = ordered_results[:size]
|
| 202 |
+
# remove tmp dir
|
| 203 |
+
shutil.rmtree(tmpdir)
|
| 204 |
+
return ordered_results
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def collect_results_gpu(result_part, size):
|
| 208 |
+
"""Collect results with GPU."""
|
| 209 |
+
rank, world_size = get_dist_info()
|
| 210 |
+
# dump result part to tensor with pickle
|
| 211 |
+
part_tensor = torch.tensor(
|
| 212 |
+
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
|
| 213 |
+
# gather all result part tensor shape
|
| 214 |
+
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
|
| 215 |
+
shape_list = [shape_tensor.clone() for _ in range(world_size)]
|
| 216 |
+
dist.all_gather(shape_list, shape_tensor)
|
| 217 |
+
# padding result part tensor to max length
|
| 218 |
+
shape_max = torch.tensor(shape_list).max()
|
| 219 |
+
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
|
| 220 |
+
part_send[:shape_tensor[0]] = part_tensor
|
| 221 |
+
part_recv_list = [
|
| 222 |
+
part_tensor.new_zeros(shape_max) for _ in range(world_size)
|
| 223 |
+
]
|
| 224 |
+
# gather all result part
|
| 225 |
+
dist.all_gather(part_recv_list, part_send)
|
| 226 |
+
|
| 227 |
+
if rank == 0:
|
| 228 |
+
part_list = []
|
| 229 |
+
for recv, shape in zip(part_recv_list, shape_list):
|
| 230 |
+
part_list.append(
|
| 231 |
+
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
|
| 232 |
+
# sort the results
|
| 233 |
+
ordered_results = []
|
| 234 |
+
for res in zip(*part_list):
|
| 235 |
+
ordered_results.extend(list(res))
|
| 236 |
+
# the dataloader may pad some samples
|
| 237 |
+
ordered_results = ordered_results[:size]
|
| 238 |
+
return ordered_results
|
RAVE-main/annotator/mmpkg/mmseg/apis/train.py
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import warnings
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from annotator.mmpkg.mmcv.parallel import MMDataParallel, MMDistributedDataParallel
|
| 7 |
+
from annotator.mmpkg.mmcv.runner import build_optimizer, build_runner
|
| 8 |
+
|
| 9 |
+
from annotator.mmpkg.mmseg.core import DistEvalHook, EvalHook
|
| 10 |
+
from annotator.mmpkg.mmseg.datasets import build_dataloader, build_dataset
|
| 11 |
+
from annotator.mmpkg.mmseg.utils import get_root_logger
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def set_random_seed(seed, deterministic=False):
|
| 15 |
+
"""Set random seed.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
seed (int): Seed to be used.
|
| 19 |
+
deterministic (bool): Whether to set the deterministic option for
|
| 20 |
+
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
|
| 21 |
+
to True and `torch.backends.cudnn.benchmark` to False.
|
| 22 |
+
Default: False.
|
| 23 |
+
"""
|
| 24 |
+
random.seed(seed)
|
| 25 |
+
np.random.seed(seed)
|
| 26 |
+
torch.manual_seed(seed)
|
| 27 |
+
torch.cuda.manual_seed_all(seed)
|
| 28 |
+
if deterministic:
|
| 29 |
+
torch.backends.cudnn.deterministic = True
|
| 30 |
+
torch.backends.cudnn.benchmark = False
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def train_segmentor(model,
|
| 34 |
+
dataset,
|
| 35 |
+
cfg,
|
| 36 |
+
distributed=False,
|
| 37 |
+
validate=False,
|
| 38 |
+
timestamp=None,
|
| 39 |
+
meta=None):
|
| 40 |
+
"""Launch segmentor training."""
|
| 41 |
+
logger = get_root_logger(cfg.log_level)
|
| 42 |
+
|
| 43 |
+
# prepare data loaders
|
| 44 |
+
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
|
| 45 |
+
data_loaders = [
|
| 46 |
+
build_dataloader(
|
| 47 |
+
ds,
|
| 48 |
+
cfg.data.samples_per_gpu,
|
| 49 |
+
cfg.data.workers_per_gpu,
|
| 50 |
+
# cfg.gpus will be ignored if distributed
|
| 51 |
+
len(cfg.gpu_ids),
|
| 52 |
+
dist=distributed,
|
| 53 |
+
seed=cfg.seed,
|
| 54 |
+
drop_last=True) for ds in dataset
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
# put model on gpus
|
| 58 |
+
if distributed:
|
| 59 |
+
find_unused_parameters = cfg.get('find_unused_parameters', False)
|
| 60 |
+
# Sets the `find_unused_parameters` parameter in
|
| 61 |
+
# torch.nn.parallel.DistributedDataParallel
|
| 62 |
+
model = MMDistributedDataParallel(
|
| 63 |
+
model.cuda(),
|
| 64 |
+
device_ids=[torch.cuda.current_device()],
|
| 65 |
+
broadcast_buffers=False,
|
| 66 |
+
find_unused_parameters=find_unused_parameters)
|
| 67 |
+
else:
|
| 68 |
+
model = MMDataParallel(
|
| 69 |
+
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
|
| 70 |
+
|
| 71 |
+
# build runner
|
| 72 |
+
optimizer = build_optimizer(model, cfg.optimizer)
|
| 73 |
+
|
| 74 |
+
if cfg.get('runner') is None:
|
| 75 |
+
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
|
| 76 |
+
warnings.warn(
|
| 77 |
+
'config is now expected to have a `runner` section, '
|
| 78 |
+
'please set `runner` in your config.', UserWarning)
|
| 79 |
+
|
| 80 |
+
runner = build_runner(
|
| 81 |
+
cfg.runner,
|
| 82 |
+
default_args=dict(
|
| 83 |
+
model=model,
|
| 84 |
+
batch_processor=None,
|
| 85 |
+
optimizer=optimizer,
|
| 86 |
+
work_dir=cfg.work_dir,
|
| 87 |
+
logger=logger,
|
| 88 |
+
meta=meta))
|
| 89 |
+
|
| 90 |
+
# register hooks
|
| 91 |
+
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
|
| 92 |
+
cfg.checkpoint_config, cfg.log_config,
|
| 93 |
+
cfg.get('momentum_config', None))
|
| 94 |
+
|
| 95 |
+
# an ugly walkaround to make the .log and .log.json filenames the same
|
| 96 |
+
runner.timestamp = timestamp
|
| 97 |
+
|
| 98 |
+
# register eval hooks
|
| 99 |
+
if validate:
|
| 100 |
+
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
|
| 101 |
+
val_dataloader = build_dataloader(
|
| 102 |
+
val_dataset,
|
| 103 |
+
samples_per_gpu=1,
|
| 104 |
+
workers_per_gpu=cfg.data.workers_per_gpu,
|
| 105 |
+
dist=distributed,
|
| 106 |
+
shuffle=False)
|
| 107 |
+
eval_cfg = cfg.get('evaluation', {})
|
| 108 |
+
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
|
| 109 |
+
eval_hook = DistEvalHook if distributed else EvalHook
|
| 110 |
+
runner.register_hook(eval_hook(val_dataloader, **eval_cfg), priority='LOW')
|
| 111 |
+
|
| 112 |
+
if cfg.resume_from:
|
| 113 |
+
runner.resume(cfg.resume_from)
|
| 114 |
+
elif cfg.load_from:
|
| 115 |
+
runner.load_checkpoint(cfg.load_from)
|
| 116 |
+
runner.run(data_loaders, cfg.workflow)
|
RAVE-main/annotator/mmpkg/mmseg/core/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .evaluation import * # noqa: F401, F403
|
| 2 |
+
from .seg import * # noqa: F401, F403
|
| 3 |
+
from .utils import * # noqa: F401, F403
|
RAVE-main/annotator/mmpkg/mmseg/core/evaluation/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .class_names import get_classes, get_palette
|
| 2 |
+
from .eval_hooks import DistEvalHook, EvalHook
|
| 3 |
+
from .metrics import eval_metrics, mean_dice, mean_fscore, mean_iou
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'EvalHook', 'DistEvalHook', 'mean_dice', 'mean_iou', 'mean_fscore',
|
| 7 |
+
'eval_metrics', 'get_classes', 'get_palette'
|
| 8 |
+
]
|
RAVE-main/annotator/mmpkg/mmseg/core/evaluation/class_names.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def cityscapes_classes():
|
| 5 |
+
"""Cityscapes class names for external use."""
|
| 6 |
+
return [
|
| 7 |
+
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
|
| 8 |
+
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
|
| 9 |
+
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
|
| 10 |
+
'bicycle'
|
| 11 |
+
]
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def ade_classes():
|
| 15 |
+
"""ADE20K class names for external use."""
|
| 16 |
+
return [
|
| 17 |
+
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
|
| 18 |
+
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
|
| 19 |
+
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
|
| 20 |
+
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
|
| 21 |
+
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
|
| 22 |
+
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
|
| 23 |
+
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
|
| 24 |
+
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
|
| 25 |
+
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
|
| 26 |
+
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
|
| 27 |
+
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
|
| 28 |
+
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
|
| 29 |
+
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
|
| 30 |
+
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
|
| 31 |
+
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
|
| 32 |
+
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
|
| 33 |
+
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
|
| 34 |
+
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
|
| 35 |
+
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
|
| 36 |
+
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
|
| 37 |
+
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
|
| 38 |
+
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
|
| 39 |
+
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
|
| 40 |
+
'clock', 'flag'
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def voc_classes():
|
| 45 |
+
"""Pascal VOC class names for external use."""
|
| 46 |
+
return [
|
| 47 |
+
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
|
| 48 |
+
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
|
| 49 |
+
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
|
| 50 |
+
'tvmonitor'
|
| 51 |
+
]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def cityscapes_palette():
|
| 55 |
+
"""Cityscapes palette for external use."""
|
| 56 |
+
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
|
| 57 |
+
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
|
| 58 |
+
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
|
| 59 |
+
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
|
| 60 |
+
[0, 0, 230], [119, 11, 32]]
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def ade_palette():
|
| 64 |
+
"""ADE20K palette for external use."""
|
| 65 |
+
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 66 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 67 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 68 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 69 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 70 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 71 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 72 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 73 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 74 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 75 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 76 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 77 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 78 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 79 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
|
| 80 |
+
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
|
| 81 |
+
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
|
| 82 |
+
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
|
| 83 |
+
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
|
| 84 |
+
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
|
| 85 |
+
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
|
| 86 |
+
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
|
| 87 |
+
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
|
| 88 |
+
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
|
| 89 |
+
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
|
| 90 |
+
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
|
| 91 |
+
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
|
| 92 |
+
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
|
| 93 |
+
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
|
| 94 |
+
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
|
| 95 |
+
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
|
| 96 |
+
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
|
| 97 |
+
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
|
| 98 |
+
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
|
| 99 |
+
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
|
| 100 |
+
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
|
| 101 |
+
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
|
| 102 |
+
[102, 255, 0], [92, 0, 255]]
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def voc_palette():
|
| 106 |
+
"""Pascal VOC palette for external use."""
|
| 107 |
+
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
|
| 108 |
+
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
|
| 109 |
+
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
|
| 110 |
+
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
|
| 111 |
+
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
dataset_aliases = {
|
| 115 |
+
'cityscapes': ['cityscapes'],
|
| 116 |
+
'ade': ['ade', 'ade20k'],
|
| 117 |
+
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
|
| 118 |
+
}
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def get_classes(dataset):
|
| 122 |
+
"""Get class names of a dataset."""
|
| 123 |
+
alias2name = {}
|
| 124 |
+
for name, aliases in dataset_aliases.items():
|
| 125 |
+
for alias in aliases:
|
| 126 |
+
alias2name[alias] = name
|
| 127 |
+
|
| 128 |
+
if mmcv.is_str(dataset):
|
| 129 |
+
if dataset in alias2name:
|
| 130 |
+
labels = eval(alias2name[dataset] + '_classes()')
|
| 131 |
+
else:
|
| 132 |
+
raise ValueError(f'Unrecognized dataset: {dataset}')
|
| 133 |
+
else:
|
| 134 |
+
raise TypeError(f'dataset must a str, but got {type(dataset)}')
|
| 135 |
+
return labels
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def get_palette(dataset):
|
| 139 |
+
"""Get class palette (RGB) of a dataset."""
|
| 140 |
+
alias2name = {}
|
| 141 |
+
for name, aliases in dataset_aliases.items():
|
| 142 |
+
for alias in aliases:
|
| 143 |
+
alias2name[alias] = name
|
| 144 |
+
|
| 145 |
+
if mmcv.is_str(dataset):
|
| 146 |
+
if dataset in alias2name:
|
| 147 |
+
labels = eval(alias2name[dataset] + '_palette()')
|
| 148 |
+
else:
|
| 149 |
+
raise ValueError(f'Unrecognized dataset: {dataset}')
|
| 150 |
+
else:
|
| 151 |
+
raise TypeError(f'dataset must a str, but got {type(dataset)}')
|
| 152 |
+
return labels
|
RAVE-main/annotator/mmpkg/mmseg/core/evaluation/eval_hooks.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from annotator.mmpkg.mmcv.runner import DistEvalHook as _DistEvalHook
|
| 4 |
+
from annotator.mmpkg.mmcv.runner import EvalHook as _EvalHook
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class EvalHook(_EvalHook):
|
| 8 |
+
"""Single GPU EvalHook, with efficient test support.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 12 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 13 |
+
Default: False.
|
| 14 |
+
efficient_test (bool): Whether save the results as local numpy files to
|
| 15 |
+
save CPU memory during evaluation. Default: False.
|
| 16 |
+
Returns:
|
| 17 |
+
list: The prediction results.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
greater_keys = ['mIoU', 'mAcc', 'aAcc']
|
| 21 |
+
|
| 22 |
+
def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs):
|
| 23 |
+
super().__init__(*args, by_epoch=by_epoch, **kwargs)
|
| 24 |
+
self.efficient_test = efficient_test
|
| 25 |
+
|
| 26 |
+
def after_train_iter(self, runner):
|
| 27 |
+
"""After train epoch hook.
|
| 28 |
+
|
| 29 |
+
Override default ``single_gpu_test``.
|
| 30 |
+
"""
|
| 31 |
+
if self.by_epoch or not self.every_n_iters(runner, self.interval):
|
| 32 |
+
return
|
| 33 |
+
from annotator.mmpkg.mmseg.apis import single_gpu_test
|
| 34 |
+
runner.log_buffer.clear()
|
| 35 |
+
results = single_gpu_test(
|
| 36 |
+
runner.model,
|
| 37 |
+
self.dataloader,
|
| 38 |
+
show=False,
|
| 39 |
+
efficient_test=self.efficient_test)
|
| 40 |
+
self.evaluate(runner, results)
|
| 41 |
+
|
| 42 |
+
def after_train_epoch(self, runner):
|
| 43 |
+
"""After train epoch hook.
|
| 44 |
+
|
| 45 |
+
Override default ``single_gpu_test``.
|
| 46 |
+
"""
|
| 47 |
+
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
|
| 48 |
+
return
|
| 49 |
+
from annotator.mmpkg.mmseg.apis import single_gpu_test
|
| 50 |
+
runner.log_buffer.clear()
|
| 51 |
+
results = single_gpu_test(runner.model, self.dataloader, show=False)
|
| 52 |
+
self.evaluate(runner, results)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class DistEvalHook(_DistEvalHook):
|
| 56 |
+
"""Distributed EvalHook, with efficient test support.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
by_epoch (bool): Determine perform evaluation by epoch or by iteration.
|
| 60 |
+
If set to True, it will perform by epoch. Otherwise, by iteration.
|
| 61 |
+
Default: False.
|
| 62 |
+
efficient_test (bool): Whether save the results as local numpy files to
|
| 63 |
+
save CPU memory during evaluation. Default: False.
|
| 64 |
+
Returns:
|
| 65 |
+
list: The prediction results.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
greater_keys = ['mIoU', 'mAcc', 'aAcc']
|
| 69 |
+
|
| 70 |
+
def __init__(self, *args, by_epoch=False, efficient_test=False, **kwargs):
|
| 71 |
+
super().__init__(*args, by_epoch=by_epoch, **kwargs)
|
| 72 |
+
self.efficient_test = efficient_test
|
| 73 |
+
|
| 74 |
+
def after_train_iter(self, runner):
|
| 75 |
+
"""After train epoch hook.
|
| 76 |
+
|
| 77 |
+
Override default ``multi_gpu_test``.
|
| 78 |
+
"""
|
| 79 |
+
if self.by_epoch or not self.every_n_iters(runner, self.interval):
|
| 80 |
+
return
|
| 81 |
+
from annotator.mmpkg.mmseg.apis import multi_gpu_test
|
| 82 |
+
runner.log_buffer.clear()
|
| 83 |
+
results = multi_gpu_test(
|
| 84 |
+
runner.model,
|
| 85 |
+
self.dataloader,
|
| 86 |
+
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
|
| 87 |
+
gpu_collect=self.gpu_collect,
|
| 88 |
+
efficient_test=self.efficient_test)
|
| 89 |
+
if runner.rank == 0:
|
| 90 |
+
print('\n')
|
| 91 |
+
self.evaluate(runner, results)
|
| 92 |
+
|
| 93 |
+
def after_train_epoch(self, runner):
|
| 94 |
+
"""After train epoch hook.
|
| 95 |
+
|
| 96 |
+
Override default ``multi_gpu_test``.
|
| 97 |
+
"""
|
| 98 |
+
if not self.by_epoch or not self.every_n_epochs(runner, self.interval):
|
| 99 |
+
return
|
| 100 |
+
from annotator.mmpkg.mmseg.apis import multi_gpu_test
|
| 101 |
+
runner.log_buffer.clear()
|
| 102 |
+
results = multi_gpu_test(
|
| 103 |
+
runner.model,
|
| 104 |
+
self.dataloader,
|
| 105 |
+
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
|
| 106 |
+
gpu_collect=self.gpu_collect)
|
| 107 |
+
if runner.rank == 0:
|
| 108 |
+
print('\n')
|
| 109 |
+
self.evaluate(runner, results)
|
RAVE-main/annotator/mmpkg/mmseg/core/evaluation/metrics.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections import OrderedDict
|
| 2 |
+
|
| 3 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def f_score(precision, recall, beta=1):
|
| 9 |
+
"""calcuate the f-score value.
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
precision (float | torch.Tensor): The precision value.
|
| 13 |
+
recall (float | torch.Tensor): The recall value.
|
| 14 |
+
beta (int): Determines the weight of recall in the combined score.
|
| 15 |
+
Default: False.
|
| 16 |
+
|
| 17 |
+
Returns:
|
| 18 |
+
[torch.tensor]: The f-score value.
|
| 19 |
+
"""
|
| 20 |
+
score = (1 + beta**2) * (precision * recall) / (
|
| 21 |
+
(beta**2 * precision) + recall)
|
| 22 |
+
return score
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def intersect_and_union(pred_label,
|
| 26 |
+
label,
|
| 27 |
+
num_classes,
|
| 28 |
+
ignore_index,
|
| 29 |
+
label_map=dict(),
|
| 30 |
+
reduce_zero_label=False):
|
| 31 |
+
"""Calculate intersection and Union.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
pred_label (ndarray | str): Prediction segmentation map
|
| 35 |
+
or predict result filename.
|
| 36 |
+
label (ndarray | str): Ground truth segmentation map
|
| 37 |
+
or label filename.
|
| 38 |
+
num_classes (int): Number of categories.
|
| 39 |
+
ignore_index (int): Index that will be ignored in evaluation.
|
| 40 |
+
label_map (dict): Mapping old labels to new labels. The parameter will
|
| 41 |
+
work only when label is str. Default: dict().
|
| 42 |
+
reduce_zero_label (bool): Whether ignore zero label. The parameter will
|
| 43 |
+
work only when label is str. Default: False.
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
torch.Tensor: The intersection of prediction and ground truth
|
| 47 |
+
histogram on all classes.
|
| 48 |
+
torch.Tensor: The union of prediction and ground truth histogram on
|
| 49 |
+
all classes.
|
| 50 |
+
torch.Tensor: The prediction histogram on all classes.
|
| 51 |
+
torch.Tensor: The ground truth histogram on all classes.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
if isinstance(pred_label, str):
|
| 55 |
+
pred_label = torch.from_numpy(np.load(pred_label))
|
| 56 |
+
else:
|
| 57 |
+
pred_label = torch.from_numpy((pred_label))
|
| 58 |
+
|
| 59 |
+
if isinstance(label, str):
|
| 60 |
+
label = torch.from_numpy(
|
| 61 |
+
mmcv.imread(label, flag='unchanged', backend='pillow'))
|
| 62 |
+
else:
|
| 63 |
+
label = torch.from_numpy(label)
|
| 64 |
+
|
| 65 |
+
if label_map is not None:
|
| 66 |
+
for old_id, new_id in label_map.items():
|
| 67 |
+
label[label == old_id] = new_id
|
| 68 |
+
if reduce_zero_label:
|
| 69 |
+
label[label == 0] = 255
|
| 70 |
+
label = label - 1
|
| 71 |
+
label[label == 254] = 255
|
| 72 |
+
|
| 73 |
+
mask = (label != ignore_index)
|
| 74 |
+
pred_label = pred_label[mask]
|
| 75 |
+
label = label[mask]
|
| 76 |
+
|
| 77 |
+
intersect = pred_label[pred_label == label]
|
| 78 |
+
area_intersect = torch.histc(
|
| 79 |
+
intersect.float(), bins=(num_classes), min=0, max=num_classes - 1)
|
| 80 |
+
area_pred_label = torch.histc(
|
| 81 |
+
pred_label.float(), bins=(num_classes), min=0, max=num_classes - 1)
|
| 82 |
+
area_label = torch.histc(
|
| 83 |
+
label.float(), bins=(num_classes), min=0, max=num_classes - 1)
|
| 84 |
+
area_union = area_pred_label + area_label - area_intersect
|
| 85 |
+
return area_intersect, area_union, area_pred_label, area_label
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def total_intersect_and_union(results,
|
| 89 |
+
gt_seg_maps,
|
| 90 |
+
num_classes,
|
| 91 |
+
ignore_index,
|
| 92 |
+
label_map=dict(),
|
| 93 |
+
reduce_zero_label=False):
|
| 94 |
+
"""Calculate Total Intersection and Union.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
results (list[ndarray] | list[str]): List of prediction segmentation
|
| 98 |
+
maps or list of prediction result filenames.
|
| 99 |
+
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
| 100 |
+
segmentation maps or list of label filenames.
|
| 101 |
+
num_classes (int): Number of categories.
|
| 102 |
+
ignore_index (int): Index that will be ignored in evaluation.
|
| 103 |
+
label_map (dict): Mapping old labels to new labels. Default: dict().
|
| 104 |
+
reduce_zero_label (bool): Whether ignore zero label. Default: False.
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
ndarray: The intersection of prediction and ground truth histogram
|
| 108 |
+
on all classes.
|
| 109 |
+
ndarray: The union of prediction and ground truth histogram on all
|
| 110 |
+
classes.
|
| 111 |
+
ndarray: The prediction histogram on all classes.
|
| 112 |
+
ndarray: The ground truth histogram on all classes.
|
| 113 |
+
"""
|
| 114 |
+
num_imgs = len(results)
|
| 115 |
+
assert len(gt_seg_maps) == num_imgs
|
| 116 |
+
total_area_intersect = torch.zeros((num_classes, ), dtype=torch.float64)
|
| 117 |
+
total_area_union = torch.zeros((num_classes, ), dtype=torch.float64)
|
| 118 |
+
total_area_pred_label = torch.zeros((num_classes, ), dtype=torch.float64)
|
| 119 |
+
total_area_label = torch.zeros((num_classes, ), dtype=torch.float64)
|
| 120 |
+
for i in range(num_imgs):
|
| 121 |
+
area_intersect, area_union, area_pred_label, area_label = \
|
| 122 |
+
intersect_and_union(
|
| 123 |
+
results[i], gt_seg_maps[i], num_classes, ignore_index,
|
| 124 |
+
label_map, reduce_zero_label)
|
| 125 |
+
total_area_intersect += area_intersect
|
| 126 |
+
total_area_union += area_union
|
| 127 |
+
total_area_pred_label += area_pred_label
|
| 128 |
+
total_area_label += area_label
|
| 129 |
+
return total_area_intersect, total_area_union, total_area_pred_label, \
|
| 130 |
+
total_area_label
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def mean_iou(results,
|
| 134 |
+
gt_seg_maps,
|
| 135 |
+
num_classes,
|
| 136 |
+
ignore_index,
|
| 137 |
+
nan_to_num=None,
|
| 138 |
+
label_map=dict(),
|
| 139 |
+
reduce_zero_label=False):
|
| 140 |
+
"""Calculate Mean Intersection and Union (mIoU)
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
results (list[ndarray] | list[str]): List of prediction segmentation
|
| 144 |
+
maps or list of prediction result filenames.
|
| 145 |
+
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
| 146 |
+
segmentation maps or list of label filenames.
|
| 147 |
+
num_classes (int): Number of categories.
|
| 148 |
+
ignore_index (int): Index that will be ignored in evaluation.
|
| 149 |
+
nan_to_num (int, optional): If specified, NaN values will be replaced
|
| 150 |
+
by the numbers defined by the user. Default: None.
|
| 151 |
+
label_map (dict): Mapping old labels to new labels. Default: dict().
|
| 152 |
+
reduce_zero_label (bool): Whether ignore zero label. Default: False.
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
dict[str, float | ndarray]:
|
| 156 |
+
<aAcc> float: Overall accuracy on all images.
|
| 157 |
+
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
|
| 158 |
+
<IoU> ndarray: Per category IoU, shape (num_classes, ).
|
| 159 |
+
"""
|
| 160 |
+
iou_result = eval_metrics(
|
| 161 |
+
results=results,
|
| 162 |
+
gt_seg_maps=gt_seg_maps,
|
| 163 |
+
num_classes=num_classes,
|
| 164 |
+
ignore_index=ignore_index,
|
| 165 |
+
metrics=['mIoU'],
|
| 166 |
+
nan_to_num=nan_to_num,
|
| 167 |
+
label_map=label_map,
|
| 168 |
+
reduce_zero_label=reduce_zero_label)
|
| 169 |
+
return iou_result
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def mean_dice(results,
|
| 173 |
+
gt_seg_maps,
|
| 174 |
+
num_classes,
|
| 175 |
+
ignore_index,
|
| 176 |
+
nan_to_num=None,
|
| 177 |
+
label_map=dict(),
|
| 178 |
+
reduce_zero_label=False):
|
| 179 |
+
"""Calculate Mean Dice (mDice)
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
results (list[ndarray] | list[str]): List of prediction segmentation
|
| 183 |
+
maps or list of prediction result filenames.
|
| 184 |
+
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
| 185 |
+
segmentation maps or list of label filenames.
|
| 186 |
+
num_classes (int): Number of categories.
|
| 187 |
+
ignore_index (int): Index that will be ignored in evaluation.
|
| 188 |
+
nan_to_num (int, optional): If specified, NaN values will be replaced
|
| 189 |
+
by the numbers defined by the user. Default: None.
|
| 190 |
+
label_map (dict): Mapping old labels to new labels. Default: dict().
|
| 191 |
+
reduce_zero_label (bool): Whether ignore zero label. Default: False.
|
| 192 |
+
|
| 193 |
+
Returns:
|
| 194 |
+
dict[str, float | ndarray]: Default metrics.
|
| 195 |
+
<aAcc> float: Overall accuracy on all images.
|
| 196 |
+
<Acc> ndarray: Per category accuracy, shape (num_classes, ).
|
| 197 |
+
<Dice> ndarray: Per category dice, shape (num_classes, ).
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
dice_result = eval_metrics(
|
| 201 |
+
results=results,
|
| 202 |
+
gt_seg_maps=gt_seg_maps,
|
| 203 |
+
num_classes=num_classes,
|
| 204 |
+
ignore_index=ignore_index,
|
| 205 |
+
metrics=['mDice'],
|
| 206 |
+
nan_to_num=nan_to_num,
|
| 207 |
+
label_map=label_map,
|
| 208 |
+
reduce_zero_label=reduce_zero_label)
|
| 209 |
+
return dice_result
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def mean_fscore(results,
|
| 213 |
+
gt_seg_maps,
|
| 214 |
+
num_classes,
|
| 215 |
+
ignore_index,
|
| 216 |
+
nan_to_num=None,
|
| 217 |
+
label_map=dict(),
|
| 218 |
+
reduce_zero_label=False,
|
| 219 |
+
beta=1):
|
| 220 |
+
"""Calculate Mean Intersection and Union (mIoU)
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
results (list[ndarray] | list[str]): List of prediction segmentation
|
| 224 |
+
maps or list of prediction result filenames.
|
| 225 |
+
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
| 226 |
+
segmentation maps or list of label filenames.
|
| 227 |
+
num_classes (int): Number of categories.
|
| 228 |
+
ignore_index (int): Index that will be ignored in evaluation.
|
| 229 |
+
nan_to_num (int, optional): If specified, NaN values will be replaced
|
| 230 |
+
by the numbers defined by the user. Default: None.
|
| 231 |
+
label_map (dict): Mapping old labels to new labels. Default: dict().
|
| 232 |
+
reduce_zero_label (bool): Whether ignore zero label. Default: False.
|
| 233 |
+
beta (int): Determines the weight of recall in the combined score.
|
| 234 |
+
Default: False.
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
Returns:
|
| 238 |
+
dict[str, float | ndarray]: Default metrics.
|
| 239 |
+
<aAcc> float: Overall accuracy on all images.
|
| 240 |
+
<Fscore> ndarray: Per category recall, shape (num_classes, ).
|
| 241 |
+
<Precision> ndarray: Per category precision, shape (num_classes, ).
|
| 242 |
+
<Recall> ndarray: Per category f-score, shape (num_classes, ).
|
| 243 |
+
"""
|
| 244 |
+
fscore_result = eval_metrics(
|
| 245 |
+
results=results,
|
| 246 |
+
gt_seg_maps=gt_seg_maps,
|
| 247 |
+
num_classes=num_classes,
|
| 248 |
+
ignore_index=ignore_index,
|
| 249 |
+
metrics=['mFscore'],
|
| 250 |
+
nan_to_num=nan_to_num,
|
| 251 |
+
label_map=label_map,
|
| 252 |
+
reduce_zero_label=reduce_zero_label,
|
| 253 |
+
beta=beta)
|
| 254 |
+
return fscore_result
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def eval_metrics(results,
|
| 258 |
+
gt_seg_maps,
|
| 259 |
+
num_classes,
|
| 260 |
+
ignore_index,
|
| 261 |
+
metrics=['mIoU'],
|
| 262 |
+
nan_to_num=None,
|
| 263 |
+
label_map=dict(),
|
| 264 |
+
reduce_zero_label=False,
|
| 265 |
+
beta=1):
|
| 266 |
+
"""Calculate evaluation metrics
|
| 267 |
+
Args:
|
| 268 |
+
results (list[ndarray] | list[str]): List of prediction segmentation
|
| 269 |
+
maps or list of prediction result filenames.
|
| 270 |
+
gt_seg_maps (list[ndarray] | list[str]): list of ground truth
|
| 271 |
+
segmentation maps or list of label filenames.
|
| 272 |
+
num_classes (int): Number of categories.
|
| 273 |
+
ignore_index (int): Index that will be ignored in evaluation.
|
| 274 |
+
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
|
| 275 |
+
nan_to_num (int, optional): If specified, NaN values will be replaced
|
| 276 |
+
by the numbers defined by the user. Default: None.
|
| 277 |
+
label_map (dict): Mapping old labels to new labels. Default: dict().
|
| 278 |
+
reduce_zero_label (bool): Whether ignore zero label. Default: False.
|
| 279 |
+
Returns:
|
| 280 |
+
float: Overall accuracy on all images.
|
| 281 |
+
ndarray: Per category accuracy, shape (num_classes, ).
|
| 282 |
+
ndarray: Per category evaluation metrics, shape (num_classes, ).
|
| 283 |
+
"""
|
| 284 |
+
if isinstance(metrics, str):
|
| 285 |
+
metrics = [metrics]
|
| 286 |
+
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
|
| 287 |
+
if not set(metrics).issubset(set(allowed_metrics)):
|
| 288 |
+
raise KeyError('metrics {} is not supported'.format(metrics))
|
| 289 |
+
|
| 290 |
+
total_area_intersect, total_area_union, total_area_pred_label, \
|
| 291 |
+
total_area_label = total_intersect_and_union(
|
| 292 |
+
results, gt_seg_maps, num_classes, ignore_index, label_map,
|
| 293 |
+
reduce_zero_label)
|
| 294 |
+
all_acc = total_area_intersect.sum() / total_area_label.sum()
|
| 295 |
+
ret_metrics = OrderedDict({'aAcc': all_acc})
|
| 296 |
+
for metric in metrics:
|
| 297 |
+
if metric == 'mIoU':
|
| 298 |
+
iou = total_area_intersect / total_area_union
|
| 299 |
+
acc = total_area_intersect / total_area_label
|
| 300 |
+
ret_metrics['IoU'] = iou
|
| 301 |
+
ret_metrics['Acc'] = acc
|
| 302 |
+
elif metric == 'mDice':
|
| 303 |
+
dice = 2 * total_area_intersect / (
|
| 304 |
+
total_area_pred_label + total_area_label)
|
| 305 |
+
acc = total_area_intersect / total_area_label
|
| 306 |
+
ret_metrics['Dice'] = dice
|
| 307 |
+
ret_metrics['Acc'] = acc
|
| 308 |
+
elif metric == 'mFscore':
|
| 309 |
+
precision = total_area_intersect / total_area_pred_label
|
| 310 |
+
recall = total_area_intersect / total_area_label
|
| 311 |
+
f_value = torch.tensor(
|
| 312 |
+
[f_score(x[0], x[1], beta) for x in zip(precision, recall)])
|
| 313 |
+
ret_metrics['Fscore'] = f_value
|
| 314 |
+
ret_metrics['Precision'] = precision
|
| 315 |
+
ret_metrics['Recall'] = recall
|
| 316 |
+
|
| 317 |
+
ret_metrics = {
|
| 318 |
+
metric: value.numpy()
|
| 319 |
+
for metric, value in ret_metrics.items()
|
| 320 |
+
}
|
| 321 |
+
if nan_to_num is not None:
|
| 322 |
+
ret_metrics = OrderedDict({
|
| 323 |
+
metric: np.nan_to_num(metric_value, nan=nan_to_num)
|
| 324 |
+
for metric, metric_value in ret_metrics.items()
|
| 325 |
+
})
|
| 326 |
+
return ret_metrics
|
RAVE-main/annotator/mmpkg/mmseg/core/seg/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .builder import build_pixel_sampler
|
| 2 |
+
from .sampler import BasePixelSampler, OHEMPixelSampler
|
| 3 |
+
|
| 4 |
+
__all__ = ['build_pixel_sampler', 'BasePixelSampler', 'OHEMPixelSampler']
|
RAVE-main/annotator/mmpkg/mmseg/core/seg/builder.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from annotator.mmpkg.mmcv.utils import Registry, build_from_cfg
|
| 2 |
+
|
| 3 |
+
PIXEL_SAMPLERS = Registry('pixel sampler')
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def build_pixel_sampler(cfg, **default_args):
|
| 7 |
+
"""Build pixel sampler for segmentation map."""
|
| 8 |
+
return build_from_cfg(cfg, PIXEL_SAMPLERS, default_args)
|
RAVE-main/annotator/mmpkg/mmseg/core/seg/sampler/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base_pixel_sampler import BasePixelSampler
|
| 2 |
+
from .ohem_pixel_sampler import OHEMPixelSampler
|
| 3 |
+
|
| 4 |
+
__all__ = ['BasePixelSampler', 'OHEMPixelSampler']
|
RAVE-main/annotator/mmpkg/mmseg/core/seg/sampler/base_pixel_sampler.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from abc import ABCMeta, abstractmethod
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class BasePixelSampler(metaclass=ABCMeta):
|
| 5 |
+
"""Base class of pixel sampler."""
|
| 6 |
+
|
| 7 |
+
def __init__(self, **kwargs):
|
| 8 |
+
pass
|
| 9 |
+
|
| 10 |
+
@abstractmethod
|
| 11 |
+
def sample(self, seg_logit, seg_label):
|
| 12 |
+
"""Placeholder for sample function."""
|
RAVE-main/annotator/mmpkg/mmseg/core/seg/sampler/ohem_pixel_sampler.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
|
| 4 |
+
from ..builder import PIXEL_SAMPLERS
|
| 5 |
+
from .base_pixel_sampler import BasePixelSampler
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@PIXEL_SAMPLERS.register_module()
|
| 9 |
+
class OHEMPixelSampler(BasePixelSampler):
|
| 10 |
+
"""Online Hard Example Mining Sampler for segmentation.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
context (nn.Module): The context of sampler, subclass of
|
| 14 |
+
:obj:`BaseDecodeHead`.
|
| 15 |
+
thresh (float, optional): The threshold for hard example selection.
|
| 16 |
+
Below which, are prediction with low confidence. If not
|
| 17 |
+
specified, the hard examples will be pixels of top ``min_kept``
|
| 18 |
+
loss. Default: None.
|
| 19 |
+
min_kept (int, optional): The minimum number of predictions to keep.
|
| 20 |
+
Default: 100000.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self, context, thresh=None, min_kept=100000):
|
| 24 |
+
super(OHEMPixelSampler, self).__init__()
|
| 25 |
+
self.context = context
|
| 26 |
+
assert min_kept > 1
|
| 27 |
+
self.thresh = thresh
|
| 28 |
+
self.min_kept = min_kept
|
| 29 |
+
|
| 30 |
+
def sample(self, seg_logit, seg_label):
|
| 31 |
+
"""Sample pixels that have high loss or with low prediction confidence.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
seg_logit (torch.Tensor): segmentation logits, shape (N, C, H, W)
|
| 35 |
+
seg_label (torch.Tensor): segmentation label, shape (N, 1, H, W)
|
| 36 |
+
|
| 37 |
+
Returns:
|
| 38 |
+
torch.Tensor: segmentation weight, shape (N, H, W)
|
| 39 |
+
"""
|
| 40 |
+
with torch.no_grad():
|
| 41 |
+
assert seg_logit.shape[2:] == seg_label.shape[2:]
|
| 42 |
+
assert seg_label.shape[1] == 1
|
| 43 |
+
seg_label = seg_label.squeeze(1).long()
|
| 44 |
+
batch_kept = self.min_kept * seg_label.size(0)
|
| 45 |
+
valid_mask = seg_label != self.context.ignore_index
|
| 46 |
+
seg_weight = seg_logit.new_zeros(size=seg_label.size())
|
| 47 |
+
valid_seg_weight = seg_weight[valid_mask]
|
| 48 |
+
if self.thresh is not None:
|
| 49 |
+
seg_prob = F.softmax(seg_logit, dim=1)
|
| 50 |
+
|
| 51 |
+
tmp_seg_label = seg_label.clone().unsqueeze(1)
|
| 52 |
+
tmp_seg_label[tmp_seg_label == self.context.ignore_index] = 0
|
| 53 |
+
seg_prob = seg_prob.gather(1, tmp_seg_label).squeeze(1)
|
| 54 |
+
sort_prob, sort_indices = seg_prob[valid_mask].sort()
|
| 55 |
+
|
| 56 |
+
if sort_prob.numel() > 0:
|
| 57 |
+
min_threshold = sort_prob[min(batch_kept,
|
| 58 |
+
sort_prob.numel() - 1)]
|
| 59 |
+
else:
|
| 60 |
+
min_threshold = 0.0
|
| 61 |
+
threshold = max(min_threshold, self.thresh)
|
| 62 |
+
valid_seg_weight[seg_prob[valid_mask] < threshold] = 1.
|
| 63 |
+
else:
|
| 64 |
+
losses = self.context.loss_decode(
|
| 65 |
+
seg_logit,
|
| 66 |
+
seg_label,
|
| 67 |
+
weight=None,
|
| 68 |
+
ignore_index=self.context.ignore_index,
|
| 69 |
+
reduction_override='none')
|
| 70 |
+
# faster than topk according to https://github.com/pytorch/pytorch/issues/22812 # noqa
|
| 71 |
+
_, sort_indices = losses[valid_mask].sort(descending=True)
|
| 72 |
+
valid_seg_weight[sort_indices[:batch_kept]] = 1.
|
| 73 |
+
|
| 74 |
+
seg_weight[valid_mask] = valid_seg_weight
|
| 75 |
+
|
| 76 |
+
return seg_weight
|
RAVE-main/annotator/mmpkg/mmseg/core/utils/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .misc import add_prefix
|
| 2 |
+
|
| 3 |
+
__all__ = ['add_prefix']
|
RAVE-main/annotator/mmpkg/mmseg/core/utils/misc.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def add_prefix(inputs, prefix):
|
| 2 |
+
"""Add prefix for dict.
|
| 3 |
+
|
| 4 |
+
Args:
|
| 5 |
+
inputs (dict): The input dict with str keys.
|
| 6 |
+
prefix (str): The prefix to add.
|
| 7 |
+
|
| 8 |
+
Returns:
|
| 9 |
+
|
| 10 |
+
dict: The dict with keys updated with ``prefix``.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
outputs = dict()
|
| 14 |
+
for name, value in inputs.items():
|
| 15 |
+
outputs[f'{prefix}.{name}'] = value
|
| 16 |
+
|
| 17 |
+
return outputs
|
RAVE-main/annotator/mmpkg/mmseg/datasets/__init__.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .ade import ADE20KDataset
|
| 2 |
+
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
|
| 3 |
+
from .chase_db1 import ChaseDB1Dataset
|
| 4 |
+
from .cityscapes import CityscapesDataset
|
| 5 |
+
from .custom import CustomDataset
|
| 6 |
+
from .dataset_wrappers import ConcatDataset, RepeatDataset
|
| 7 |
+
from .drive import DRIVEDataset
|
| 8 |
+
from .hrf import HRFDataset
|
| 9 |
+
from .pascal_context import PascalContextDataset, PascalContextDataset59
|
| 10 |
+
from .stare import STAREDataset
|
| 11 |
+
from .voc import PascalVOCDataset
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
|
| 15 |
+
'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
|
| 16 |
+
'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
|
| 17 |
+
'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
|
| 18 |
+
'STAREDataset'
|
| 19 |
+
]
|
RAVE-main/annotator/mmpkg/mmseg/datasets/cityscapes.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
import tempfile
|
| 3 |
+
|
| 4 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 5 |
+
import numpy as np
|
| 6 |
+
from annotator.mmpkg.mmcv.utils import print_log
|
| 7 |
+
from PIL import Image
|
| 8 |
+
|
| 9 |
+
from .builder import DATASETS
|
| 10 |
+
from .custom import CustomDataset
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@DATASETS.register_module()
|
| 14 |
+
class CityscapesDataset(CustomDataset):
|
| 15 |
+
"""Cityscapes dataset.
|
| 16 |
+
|
| 17 |
+
The ``img_suffix`` is fixed to '_leftImg8bit.png' and ``seg_map_suffix`` is
|
| 18 |
+
fixed to '_gtFine_labelTrainIds.png' for Cityscapes dataset.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
CLASSES = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
|
| 22 |
+
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
|
| 23 |
+
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
|
| 24 |
+
'bicycle')
|
| 25 |
+
|
| 26 |
+
PALETTE = [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
|
| 27 |
+
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
|
| 28 |
+
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
|
| 29 |
+
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100],
|
| 30 |
+
[0, 80, 100], [0, 0, 230], [119, 11, 32]]
|
| 31 |
+
|
| 32 |
+
def __init__(self, **kwargs):
|
| 33 |
+
super(CityscapesDataset, self).__init__(
|
| 34 |
+
img_suffix='_leftImg8bit.png',
|
| 35 |
+
seg_map_suffix='_gtFine_labelTrainIds.png',
|
| 36 |
+
**kwargs)
|
| 37 |
+
|
| 38 |
+
@staticmethod
|
| 39 |
+
def _convert_to_label_id(result):
|
| 40 |
+
"""Convert trainId to id for cityscapes."""
|
| 41 |
+
if isinstance(result, str):
|
| 42 |
+
result = np.load(result)
|
| 43 |
+
import cityscapesscripts.helpers.labels as CSLabels
|
| 44 |
+
result_copy = result.copy()
|
| 45 |
+
for trainId, label in CSLabels.trainId2label.items():
|
| 46 |
+
result_copy[result == trainId] = label.id
|
| 47 |
+
|
| 48 |
+
return result_copy
|
| 49 |
+
|
| 50 |
+
def results2img(self, results, imgfile_prefix, to_label_id):
|
| 51 |
+
"""Write the segmentation results to images.
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
results (list[list | tuple | ndarray]): Testing results of the
|
| 55 |
+
dataset.
|
| 56 |
+
imgfile_prefix (str): The filename prefix of the png files.
|
| 57 |
+
If the prefix is "somepath/xxx",
|
| 58 |
+
the png files will be named "somepath/xxx.png".
|
| 59 |
+
to_label_id (bool): whether convert output to label_id for
|
| 60 |
+
submission
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
list[str: str]: result txt files which contains corresponding
|
| 64 |
+
semantic segmentation images.
|
| 65 |
+
"""
|
| 66 |
+
mmcv.mkdir_or_exist(imgfile_prefix)
|
| 67 |
+
result_files = []
|
| 68 |
+
prog_bar = mmcv.ProgressBar(len(self))
|
| 69 |
+
for idx in range(len(self)):
|
| 70 |
+
result = results[idx]
|
| 71 |
+
if to_label_id:
|
| 72 |
+
result = self._convert_to_label_id(result)
|
| 73 |
+
filename = self.img_infos[idx]['filename']
|
| 74 |
+
basename = osp.splitext(osp.basename(filename))[0]
|
| 75 |
+
|
| 76 |
+
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
|
| 77 |
+
|
| 78 |
+
output = Image.fromarray(result.astype(np.uint8)).convert('P')
|
| 79 |
+
import cityscapesscripts.helpers.labels as CSLabels
|
| 80 |
+
palette = np.zeros((len(CSLabels.id2label), 3), dtype=np.uint8)
|
| 81 |
+
for label_id, label in CSLabels.id2label.items():
|
| 82 |
+
palette[label_id] = label.color
|
| 83 |
+
|
| 84 |
+
output.putpalette(palette)
|
| 85 |
+
output.save(png_filename)
|
| 86 |
+
result_files.append(png_filename)
|
| 87 |
+
prog_bar.update()
|
| 88 |
+
|
| 89 |
+
return result_files
|
| 90 |
+
|
| 91 |
+
def format_results(self, results, imgfile_prefix=None, to_label_id=True):
|
| 92 |
+
"""Format the results into dir (standard format for Cityscapes
|
| 93 |
+
evaluation).
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
results (list): Testing results of the dataset.
|
| 97 |
+
imgfile_prefix (str | None): The prefix of images files. It
|
| 98 |
+
includes the file path and the prefix of filename, e.g.,
|
| 99 |
+
"a/b/prefix". If not specified, a temp file will be created.
|
| 100 |
+
Default: None.
|
| 101 |
+
to_label_id (bool): whether convert output to label_id for
|
| 102 |
+
submission. Default: False
|
| 103 |
+
|
| 104 |
+
Returns:
|
| 105 |
+
tuple: (result_files, tmp_dir), result_files is a list containing
|
| 106 |
+
the image paths, tmp_dir is the temporal directory created
|
| 107 |
+
for saving json/png files when img_prefix is not specified.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
assert isinstance(results, list), 'results must be a list'
|
| 111 |
+
assert len(results) == len(self), (
|
| 112 |
+
'The length of results is not equal to the dataset len: '
|
| 113 |
+
f'{len(results)} != {len(self)}')
|
| 114 |
+
|
| 115 |
+
if imgfile_prefix is None:
|
| 116 |
+
tmp_dir = tempfile.TemporaryDirectory()
|
| 117 |
+
imgfile_prefix = tmp_dir.name
|
| 118 |
+
else:
|
| 119 |
+
tmp_dir = None
|
| 120 |
+
result_files = self.results2img(results, imgfile_prefix, to_label_id)
|
| 121 |
+
|
| 122 |
+
return result_files, tmp_dir
|
| 123 |
+
|
| 124 |
+
def evaluate(self,
|
| 125 |
+
results,
|
| 126 |
+
metric='mIoU',
|
| 127 |
+
logger=None,
|
| 128 |
+
imgfile_prefix=None,
|
| 129 |
+
efficient_test=False):
|
| 130 |
+
"""Evaluation in Cityscapes/default protocol.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
results (list): Testing results of the dataset.
|
| 134 |
+
metric (str | list[str]): Metrics to be evaluated.
|
| 135 |
+
logger (logging.Logger | None | str): Logger used for printing
|
| 136 |
+
related information during evaluation. Default: None.
|
| 137 |
+
imgfile_prefix (str | None): The prefix of output image file,
|
| 138 |
+
for cityscapes evaluation only. It includes the file path and
|
| 139 |
+
the prefix of filename, e.g., "a/b/prefix".
|
| 140 |
+
If results are evaluated with cityscapes protocol, it would be
|
| 141 |
+
the prefix of output png files. The output files would be
|
| 142 |
+
png images under folder "a/b/prefix/xxx.png", where "xxx" is
|
| 143 |
+
the image name of cityscapes. If not specified, a temp file
|
| 144 |
+
will be created for evaluation.
|
| 145 |
+
Default: None.
|
| 146 |
+
|
| 147 |
+
Returns:
|
| 148 |
+
dict[str, float]: Cityscapes/default metrics.
|
| 149 |
+
"""
|
| 150 |
+
|
| 151 |
+
eval_results = dict()
|
| 152 |
+
metrics = metric.copy() if isinstance(metric, list) else [metric]
|
| 153 |
+
if 'cityscapes' in metrics:
|
| 154 |
+
eval_results.update(
|
| 155 |
+
self._evaluate_cityscapes(results, logger, imgfile_prefix))
|
| 156 |
+
metrics.remove('cityscapes')
|
| 157 |
+
if len(metrics) > 0:
|
| 158 |
+
eval_results.update(
|
| 159 |
+
super(CityscapesDataset,
|
| 160 |
+
self).evaluate(results, metrics, logger, efficient_test))
|
| 161 |
+
|
| 162 |
+
return eval_results
|
| 163 |
+
|
| 164 |
+
def _evaluate_cityscapes(self, results, logger, imgfile_prefix):
|
| 165 |
+
"""Evaluation in Cityscapes protocol.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
results (list): Testing results of the dataset.
|
| 169 |
+
logger (logging.Logger | str | None): Logger used for printing
|
| 170 |
+
related information during evaluation. Default: None.
|
| 171 |
+
imgfile_prefix (str | None): The prefix of output image file
|
| 172 |
+
|
| 173 |
+
Returns:
|
| 174 |
+
dict[str: float]: Cityscapes evaluation results.
|
| 175 |
+
"""
|
| 176 |
+
try:
|
| 177 |
+
import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as CSEval # noqa
|
| 178 |
+
except ImportError:
|
| 179 |
+
raise ImportError('Please run "pip install cityscapesscripts" to '
|
| 180 |
+
'install cityscapesscripts first.')
|
| 181 |
+
msg = 'Evaluating in Cityscapes style'
|
| 182 |
+
if logger is None:
|
| 183 |
+
msg = '\n' + msg
|
| 184 |
+
print_log(msg, logger=logger)
|
| 185 |
+
|
| 186 |
+
result_files, tmp_dir = self.format_results(results, imgfile_prefix)
|
| 187 |
+
|
| 188 |
+
if tmp_dir is None:
|
| 189 |
+
result_dir = imgfile_prefix
|
| 190 |
+
else:
|
| 191 |
+
result_dir = tmp_dir.name
|
| 192 |
+
|
| 193 |
+
eval_results = dict()
|
| 194 |
+
print_log(f'Evaluating results under {result_dir} ...', logger=logger)
|
| 195 |
+
|
| 196 |
+
CSEval.args.evalInstLevelScore = True
|
| 197 |
+
CSEval.args.predictionPath = osp.abspath(result_dir)
|
| 198 |
+
CSEval.args.evalPixelAccuracy = True
|
| 199 |
+
CSEval.args.JSONOutput = False
|
| 200 |
+
|
| 201 |
+
seg_map_list = []
|
| 202 |
+
pred_list = []
|
| 203 |
+
|
| 204 |
+
# when evaluating with official cityscapesscripts,
|
| 205 |
+
# **_gtFine_labelIds.png is used
|
| 206 |
+
for seg_map in mmcv.scandir(
|
| 207 |
+
self.ann_dir, 'gtFine_labelIds.png', recursive=True):
|
| 208 |
+
seg_map_list.append(osp.join(self.ann_dir, seg_map))
|
| 209 |
+
pred_list.append(CSEval.getPrediction(CSEval.args, seg_map))
|
| 210 |
+
|
| 211 |
+
eval_results.update(
|
| 212 |
+
CSEval.evaluateImgLists(pred_list, seg_map_list, CSEval.args))
|
| 213 |
+
|
| 214 |
+
if tmp_dir is not None:
|
| 215 |
+
tmp_dir.cleanup()
|
| 216 |
+
|
| 217 |
+
return eval_results
|
RAVE-main/annotator/mmpkg/mmseg/datasets/custom.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import os.path as osp
|
| 3 |
+
from collections import OrderedDict
|
| 4 |
+
from functools import reduce
|
| 5 |
+
|
| 6 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 7 |
+
import numpy as np
|
| 8 |
+
from annotator.mmpkg.mmcv.utils import print_log
|
| 9 |
+
from torch.utils.data import Dataset
|
| 10 |
+
|
| 11 |
+
from annotator.mmpkg.mmseg.core import eval_metrics
|
| 12 |
+
from annotator.mmpkg.mmseg.utils import get_root_logger
|
| 13 |
+
from .builder import DATASETS
|
| 14 |
+
from .pipelines import Compose
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@DATASETS.register_module()
|
| 18 |
+
class CustomDataset(Dataset):
|
| 19 |
+
"""Custom dataset for semantic segmentation. An example of file structure
|
| 20 |
+
is as followed.
|
| 21 |
+
|
| 22 |
+
.. code-block:: none
|
| 23 |
+
|
| 24 |
+
├── data
|
| 25 |
+
│ ├── my_dataset
|
| 26 |
+
│ │ ├── img_dir
|
| 27 |
+
│ │ │ ├── train
|
| 28 |
+
│ │ │ │ ├── xxx{img_suffix}
|
| 29 |
+
│ │ │ │ ├── yyy{img_suffix}
|
| 30 |
+
│ │ │ │ ├── zzz{img_suffix}
|
| 31 |
+
│ │ │ ├── val
|
| 32 |
+
│ │ ├── ann_dir
|
| 33 |
+
│ │ │ ├── train
|
| 34 |
+
│ │ │ │ ├── xxx{seg_map_suffix}
|
| 35 |
+
│ │ │ │ ├── yyy{seg_map_suffix}
|
| 36 |
+
│ │ │ │ ├── zzz{seg_map_suffix}
|
| 37 |
+
│ │ │ ├── val
|
| 38 |
+
|
| 39 |
+
The img/gt_semantic_seg pair of CustomDataset should be of the same
|
| 40 |
+
except suffix. A valid img/gt_semantic_seg filename pair should be like
|
| 41 |
+
``xxx{img_suffix}`` and ``xxx{seg_map_suffix}`` (extension is also included
|
| 42 |
+
in the suffix). If split is given, then ``xxx`` is specified in txt file.
|
| 43 |
+
Otherwise, all files in ``img_dir/``and ``ann_dir`` will be loaded.
|
| 44 |
+
Please refer to ``docs/tutorials/new_dataset.md`` for more details.
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
pipeline (list[dict]): Processing pipeline
|
| 49 |
+
img_dir (str): Path to image directory
|
| 50 |
+
img_suffix (str): Suffix of images. Default: '.jpg'
|
| 51 |
+
ann_dir (str, optional): Path to annotation directory. Default: None
|
| 52 |
+
seg_map_suffix (str): Suffix of segmentation maps. Default: '.png'
|
| 53 |
+
split (str, optional): Split txt file. If split is specified, only
|
| 54 |
+
file with suffix in the splits will be loaded. Otherwise, all
|
| 55 |
+
images in img_dir/ann_dir will be loaded. Default: None
|
| 56 |
+
data_root (str, optional): Data root for img_dir/ann_dir. Default:
|
| 57 |
+
None.
|
| 58 |
+
test_mode (bool): If test_mode=True, gt wouldn't be loaded.
|
| 59 |
+
ignore_index (int): The label index to be ignored. Default: 255
|
| 60 |
+
reduce_zero_label (bool): Whether to mark label zero as ignored.
|
| 61 |
+
Default: False
|
| 62 |
+
classes (str | Sequence[str], optional): Specify classes to load.
|
| 63 |
+
If is None, ``cls.CLASSES`` will be used. Default: None.
|
| 64 |
+
palette (Sequence[Sequence[int]]] | np.ndarray | None):
|
| 65 |
+
The palette of segmentation map. If None is given, and
|
| 66 |
+
self.PALETTE is None, random palette will be generated.
|
| 67 |
+
Default: None
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
CLASSES = None
|
| 71 |
+
|
| 72 |
+
PALETTE = None
|
| 73 |
+
|
| 74 |
+
def __init__(self,
|
| 75 |
+
pipeline,
|
| 76 |
+
img_dir,
|
| 77 |
+
img_suffix='.jpg',
|
| 78 |
+
ann_dir=None,
|
| 79 |
+
seg_map_suffix='.png',
|
| 80 |
+
split=None,
|
| 81 |
+
data_root=None,
|
| 82 |
+
test_mode=False,
|
| 83 |
+
ignore_index=255,
|
| 84 |
+
reduce_zero_label=False,
|
| 85 |
+
classes=None,
|
| 86 |
+
palette=None):
|
| 87 |
+
self.pipeline = Compose(pipeline)
|
| 88 |
+
self.img_dir = img_dir
|
| 89 |
+
self.img_suffix = img_suffix
|
| 90 |
+
self.ann_dir = ann_dir
|
| 91 |
+
self.seg_map_suffix = seg_map_suffix
|
| 92 |
+
self.split = split
|
| 93 |
+
self.data_root = data_root
|
| 94 |
+
self.test_mode = test_mode
|
| 95 |
+
self.ignore_index = ignore_index
|
| 96 |
+
self.reduce_zero_label = reduce_zero_label
|
| 97 |
+
self.label_map = None
|
| 98 |
+
self.CLASSES, self.PALETTE = self.get_classes_and_palette(
|
| 99 |
+
classes, palette)
|
| 100 |
+
|
| 101 |
+
# join paths if data_root is specified
|
| 102 |
+
if self.data_root is not None:
|
| 103 |
+
if not osp.isabs(self.img_dir):
|
| 104 |
+
self.img_dir = osp.join(self.data_root, self.img_dir)
|
| 105 |
+
if not (self.ann_dir is None or osp.isabs(self.ann_dir)):
|
| 106 |
+
self.ann_dir = osp.join(self.data_root, self.ann_dir)
|
| 107 |
+
if not (self.split is None or osp.isabs(self.split)):
|
| 108 |
+
self.split = osp.join(self.data_root, self.split)
|
| 109 |
+
|
| 110 |
+
# load annotations
|
| 111 |
+
self.img_infos = self.load_annotations(self.img_dir, self.img_suffix,
|
| 112 |
+
self.ann_dir,
|
| 113 |
+
self.seg_map_suffix, self.split)
|
| 114 |
+
|
| 115 |
+
def __len__(self):
|
| 116 |
+
"""Total number of samples of data."""
|
| 117 |
+
return len(self.img_infos)
|
| 118 |
+
|
| 119 |
+
def load_annotations(self, img_dir, img_suffix, ann_dir, seg_map_suffix,
|
| 120 |
+
split):
|
| 121 |
+
"""Load annotation from directory.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
img_dir (str): Path to image directory
|
| 125 |
+
img_suffix (str): Suffix of images.
|
| 126 |
+
ann_dir (str|None): Path to annotation directory.
|
| 127 |
+
seg_map_suffix (str|None): Suffix of segmentation maps.
|
| 128 |
+
split (str|None): Split txt file. If split is specified, only file
|
| 129 |
+
with suffix in the splits will be loaded. Otherwise, all images
|
| 130 |
+
in img_dir/ann_dir will be loaded. Default: None
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
list[dict]: All image info of dataset.
|
| 134 |
+
"""
|
| 135 |
+
|
| 136 |
+
img_infos = []
|
| 137 |
+
if split is not None:
|
| 138 |
+
with open(split) as f:
|
| 139 |
+
for line in f:
|
| 140 |
+
img_name = line.strip()
|
| 141 |
+
img_info = dict(filename=img_name + img_suffix)
|
| 142 |
+
if ann_dir is not None:
|
| 143 |
+
seg_map = img_name + seg_map_suffix
|
| 144 |
+
img_info['ann'] = dict(seg_map=seg_map)
|
| 145 |
+
img_infos.append(img_info)
|
| 146 |
+
else:
|
| 147 |
+
for img in mmcv.scandir(img_dir, img_suffix, recursive=True):
|
| 148 |
+
img_info = dict(filename=img)
|
| 149 |
+
if ann_dir is not None:
|
| 150 |
+
seg_map = img.replace(img_suffix, seg_map_suffix)
|
| 151 |
+
img_info['ann'] = dict(seg_map=seg_map)
|
| 152 |
+
img_infos.append(img_info)
|
| 153 |
+
|
| 154 |
+
print_log(f'Loaded {len(img_infos)} images', logger=get_root_logger())
|
| 155 |
+
return img_infos
|
| 156 |
+
|
| 157 |
+
def get_ann_info(self, idx):
|
| 158 |
+
"""Get annotation by index.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
idx (int): Index of data.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
dict: Annotation info of specified index.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
return self.img_infos[idx]['ann']
|
| 168 |
+
|
| 169 |
+
def pre_pipeline(self, results):
|
| 170 |
+
"""Prepare results dict for pipeline."""
|
| 171 |
+
results['seg_fields'] = []
|
| 172 |
+
results['img_prefix'] = self.img_dir
|
| 173 |
+
results['seg_prefix'] = self.ann_dir
|
| 174 |
+
if self.custom_classes:
|
| 175 |
+
results['label_map'] = self.label_map
|
| 176 |
+
|
| 177 |
+
def __getitem__(self, idx):
|
| 178 |
+
"""Get training/test data after pipeline.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
idx (int): Index of data.
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
dict: Training/test data (with annotation if `test_mode` is set
|
| 185 |
+
False).
|
| 186 |
+
"""
|
| 187 |
+
|
| 188 |
+
if self.test_mode:
|
| 189 |
+
return self.prepare_test_img(idx)
|
| 190 |
+
else:
|
| 191 |
+
return self.prepare_train_img(idx)
|
| 192 |
+
|
| 193 |
+
def prepare_train_img(self, idx):
|
| 194 |
+
"""Get training data and annotations after pipeline.
|
| 195 |
+
|
| 196 |
+
Args:
|
| 197 |
+
idx (int): Index of data.
|
| 198 |
+
|
| 199 |
+
Returns:
|
| 200 |
+
dict: Training data and annotation after pipeline with new keys
|
| 201 |
+
introduced by pipeline.
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
img_info = self.img_infos[idx]
|
| 205 |
+
ann_info = self.get_ann_info(idx)
|
| 206 |
+
results = dict(img_info=img_info, ann_info=ann_info)
|
| 207 |
+
self.pre_pipeline(results)
|
| 208 |
+
return self.pipeline(results)
|
| 209 |
+
|
| 210 |
+
def prepare_test_img(self, idx):
|
| 211 |
+
"""Get testing data after pipeline.
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
idx (int): Index of data.
|
| 215 |
+
|
| 216 |
+
Returns:
|
| 217 |
+
dict: Testing data after pipeline with new keys introduced by
|
| 218 |
+
pipeline.
|
| 219 |
+
"""
|
| 220 |
+
|
| 221 |
+
img_info = self.img_infos[idx]
|
| 222 |
+
results = dict(img_info=img_info)
|
| 223 |
+
self.pre_pipeline(results)
|
| 224 |
+
return self.pipeline(results)
|
| 225 |
+
|
| 226 |
+
def format_results(self, results, **kwargs):
|
| 227 |
+
"""Place holder to format result to dataset specific output."""
|
| 228 |
+
|
| 229 |
+
def get_gt_seg_maps(self, efficient_test=False):
|
| 230 |
+
"""Get ground truth segmentation maps for evaluation."""
|
| 231 |
+
gt_seg_maps = []
|
| 232 |
+
for img_info in self.img_infos:
|
| 233 |
+
seg_map = osp.join(self.ann_dir, img_info['ann']['seg_map'])
|
| 234 |
+
if efficient_test:
|
| 235 |
+
gt_seg_map = seg_map
|
| 236 |
+
else:
|
| 237 |
+
gt_seg_map = mmcv.imread(
|
| 238 |
+
seg_map, flag='unchanged', backend='pillow')
|
| 239 |
+
gt_seg_maps.append(gt_seg_map)
|
| 240 |
+
return gt_seg_maps
|
| 241 |
+
|
| 242 |
+
def get_classes_and_palette(self, classes=None, palette=None):
|
| 243 |
+
"""Get class names of current dataset.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
classes (Sequence[str] | str | None): If classes is None, use
|
| 247 |
+
default CLASSES defined by builtin dataset. If classes is a
|
| 248 |
+
string, take it as a file name. The file contains the name of
|
| 249 |
+
classes where each line contains one class name. If classes is
|
| 250 |
+
a tuple or list, override the CLASSES defined by the dataset.
|
| 251 |
+
palette (Sequence[Sequence[int]]] | np.ndarray | None):
|
| 252 |
+
The palette of segmentation map. If None is given, random
|
| 253 |
+
palette will be generated. Default: None
|
| 254 |
+
"""
|
| 255 |
+
if classes is None:
|
| 256 |
+
self.custom_classes = False
|
| 257 |
+
return self.CLASSES, self.PALETTE
|
| 258 |
+
|
| 259 |
+
self.custom_classes = True
|
| 260 |
+
if isinstance(classes, str):
|
| 261 |
+
# take it as a file path
|
| 262 |
+
class_names = mmcv.list_from_file(classes)
|
| 263 |
+
elif isinstance(classes, (tuple, list)):
|
| 264 |
+
class_names = classes
|
| 265 |
+
else:
|
| 266 |
+
raise ValueError(f'Unsupported type {type(classes)} of classes.')
|
| 267 |
+
|
| 268 |
+
if self.CLASSES:
|
| 269 |
+
if not set(classes).issubset(self.CLASSES):
|
| 270 |
+
raise ValueError('classes is not a subset of CLASSES.')
|
| 271 |
+
|
| 272 |
+
# dictionary, its keys are the old label ids and its values
|
| 273 |
+
# are the new label ids.
|
| 274 |
+
# used for changing pixel labels in load_annotations.
|
| 275 |
+
self.label_map = {}
|
| 276 |
+
for i, c in enumerate(self.CLASSES):
|
| 277 |
+
if c not in class_names:
|
| 278 |
+
self.label_map[i] = -1
|
| 279 |
+
else:
|
| 280 |
+
self.label_map[i] = classes.index(c)
|
| 281 |
+
|
| 282 |
+
palette = self.get_palette_for_custom_classes(class_names, palette)
|
| 283 |
+
|
| 284 |
+
return class_names, palette
|
| 285 |
+
|
| 286 |
+
def get_palette_for_custom_classes(self, class_names, palette=None):
|
| 287 |
+
|
| 288 |
+
if self.label_map is not None:
|
| 289 |
+
# return subset of palette
|
| 290 |
+
palette = []
|
| 291 |
+
for old_id, new_id in sorted(
|
| 292 |
+
self.label_map.items(), key=lambda x: x[1]):
|
| 293 |
+
if new_id != -1:
|
| 294 |
+
palette.append(self.PALETTE[old_id])
|
| 295 |
+
palette = type(self.PALETTE)(palette)
|
| 296 |
+
|
| 297 |
+
elif palette is None:
|
| 298 |
+
if self.PALETTE is None:
|
| 299 |
+
palette = np.random.randint(0, 255, size=(len(class_names), 3))
|
| 300 |
+
else:
|
| 301 |
+
palette = self.PALETTE
|
| 302 |
+
|
| 303 |
+
return palette
|
| 304 |
+
|
| 305 |
+
def evaluate(self,
|
| 306 |
+
results,
|
| 307 |
+
metric='mIoU',
|
| 308 |
+
logger=None,
|
| 309 |
+
efficient_test=False,
|
| 310 |
+
**kwargs):
|
| 311 |
+
"""Evaluate the dataset.
|
| 312 |
+
|
| 313 |
+
Args:
|
| 314 |
+
results (list): Testing results of the dataset.
|
| 315 |
+
metric (str | list[str]): Metrics to be evaluated. 'mIoU',
|
| 316 |
+
'mDice' and 'mFscore' are supported.
|
| 317 |
+
logger (logging.Logger | None | str): Logger used for printing
|
| 318 |
+
related information during evaluation. Default: None.
|
| 319 |
+
|
| 320 |
+
Returns:
|
| 321 |
+
dict[str, float]: Default metrics.
|
| 322 |
+
"""
|
| 323 |
+
|
| 324 |
+
if isinstance(metric, str):
|
| 325 |
+
metric = [metric]
|
| 326 |
+
allowed_metrics = ['mIoU', 'mDice', 'mFscore']
|
| 327 |
+
if not set(metric).issubset(set(allowed_metrics)):
|
| 328 |
+
raise KeyError('metric {} is not supported'.format(metric))
|
| 329 |
+
eval_results = {}
|
| 330 |
+
gt_seg_maps = self.get_gt_seg_maps(efficient_test)
|
| 331 |
+
if self.CLASSES is None:
|
| 332 |
+
num_classes = len(
|
| 333 |
+
reduce(np.union1d, [np.unique(_) for _ in gt_seg_maps]))
|
| 334 |
+
else:
|
| 335 |
+
num_classes = len(self.CLASSES)
|
| 336 |
+
ret_metrics = eval_metrics(
|
| 337 |
+
results,
|
| 338 |
+
gt_seg_maps,
|
| 339 |
+
num_classes,
|
| 340 |
+
self.ignore_index,
|
| 341 |
+
metric,
|
| 342 |
+
label_map=self.label_map,
|
| 343 |
+
reduce_zero_label=self.reduce_zero_label)
|
| 344 |
+
|
| 345 |
+
if self.CLASSES is None:
|
| 346 |
+
class_names = tuple(range(num_classes))
|
| 347 |
+
else:
|
| 348 |
+
class_names = self.CLASSES
|
| 349 |
+
|
| 350 |
+
# summary table
|
| 351 |
+
ret_metrics_summary = OrderedDict({
|
| 352 |
+
ret_metric: np.round(np.nanmean(ret_metric_value) * 100, 2)
|
| 353 |
+
for ret_metric, ret_metric_value in ret_metrics.items()
|
| 354 |
+
})
|
| 355 |
+
|
| 356 |
+
# each class table
|
| 357 |
+
ret_metrics.pop('aAcc', None)
|
| 358 |
+
ret_metrics_class = OrderedDict({
|
| 359 |
+
ret_metric: np.round(ret_metric_value * 100, 2)
|
| 360 |
+
for ret_metric, ret_metric_value in ret_metrics.items()
|
| 361 |
+
})
|
| 362 |
+
ret_metrics_class.update({'Class': class_names})
|
| 363 |
+
ret_metrics_class.move_to_end('Class', last=False)
|
| 364 |
+
|
| 365 |
+
try:
|
| 366 |
+
from prettytable import PrettyTable
|
| 367 |
+
# for logger
|
| 368 |
+
class_table_data = PrettyTable()
|
| 369 |
+
for key, val in ret_metrics_class.items():
|
| 370 |
+
class_table_data.add_column(key, val)
|
| 371 |
+
|
| 372 |
+
summary_table_data = PrettyTable()
|
| 373 |
+
for key, val in ret_metrics_summary.items():
|
| 374 |
+
if key == 'aAcc':
|
| 375 |
+
summary_table_data.add_column(key, [val])
|
| 376 |
+
else:
|
| 377 |
+
summary_table_data.add_column('m' + key, [val])
|
| 378 |
+
|
| 379 |
+
print_log('per class results:', logger)
|
| 380 |
+
print_log('\n' + class_table_data.get_string(), logger=logger)
|
| 381 |
+
print_log('Summary:', logger)
|
| 382 |
+
print_log('\n' + summary_table_data.get_string(), logger=logger)
|
| 383 |
+
except ImportError: # prettytable is not installed
|
| 384 |
+
pass
|
| 385 |
+
|
| 386 |
+
# each metric dict
|
| 387 |
+
for key, value in ret_metrics_summary.items():
|
| 388 |
+
if key == 'aAcc':
|
| 389 |
+
eval_results[key] = value / 100.0
|
| 390 |
+
else:
|
| 391 |
+
eval_results['m' + key] = value / 100.0
|
| 392 |
+
|
| 393 |
+
ret_metrics_class.pop('Class', None)
|
| 394 |
+
for key, value in ret_metrics_class.items():
|
| 395 |
+
eval_results.update({
|
| 396 |
+
key + '.' + str(name): value[idx] / 100.0
|
| 397 |
+
for idx, name in enumerate(class_names)
|
| 398 |
+
})
|
| 399 |
+
|
| 400 |
+
if mmcv.is_list_of(results, str):
|
| 401 |
+
for file_name in results:
|
| 402 |
+
os.remove(file_name)
|
| 403 |
+
return eval_results
|
RAVE-main/annotator/mmpkg/mmseg/datasets/dataset_wrappers.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@DATASETS.register_module()
|
| 7 |
+
class ConcatDataset(_ConcatDataset):
|
| 8 |
+
"""A wrapper of concatenated dataset.
|
| 9 |
+
|
| 10 |
+
Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
|
| 11 |
+
concat the group flag for image aspect ratio.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
datasets (list[:obj:`Dataset`]): A list of datasets.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, datasets):
|
| 18 |
+
super(ConcatDataset, self).__init__(datasets)
|
| 19 |
+
self.CLASSES = datasets[0].CLASSES
|
| 20 |
+
self.PALETTE = datasets[0].PALETTE
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@DATASETS.register_module()
|
| 24 |
+
class RepeatDataset(object):
|
| 25 |
+
"""A wrapper of repeated dataset.
|
| 26 |
+
|
| 27 |
+
The length of repeated dataset will be `times` larger than the original
|
| 28 |
+
dataset. This is useful when the data loading time is long but the dataset
|
| 29 |
+
is small. Using RepeatDataset can reduce the data loading time between
|
| 30 |
+
epochs.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
dataset (:obj:`Dataset`): The dataset to be repeated.
|
| 34 |
+
times (int): Repeat times.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self, dataset, times):
|
| 38 |
+
self.dataset = dataset
|
| 39 |
+
self.times = times
|
| 40 |
+
self.CLASSES = dataset.CLASSES
|
| 41 |
+
self.PALETTE = dataset.PALETTE
|
| 42 |
+
self._ori_len = len(self.dataset)
|
| 43 |
+
|
| 44 |
+
def __getitem__(self, idx):
|
| 45 |
+
"""Get item from original dataset."""
|
| 46 |
+
return self.dataset[idx % self._ori_len]
|
| 47 |
+
|
| 48 |
+
def __len__(self):
|
| 49 |
+
"""The length is multiplied by ``times``"""
|
| 50 |
+
return self.times * self._ori_len
|
RAVE-main/annotator/mmpkg/mmseg/datasets/drive.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class DRIVEDataset(CustomDataset):
|
| 9 |
+
"""DRIVE dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for DRIVE, 0 stands for background, which is
|
| 12 |
+
included in 2 categories. ``reduce_zero_label`` is fixed to False. The
|
| 13 |
+
``img_suffix`` is fixed to '.png' and ``seg_map_suffix`` is fixed to
|
| 14 |
+
'_manual1.png'.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
CLASSES = ('background', 'vessel')
|
| 18 |
+
|
| 19 |
+
PALETTE = [[120, 120, 120], [6, 230, 230]]
|
| 20 |
+
|
| 21 |
+
def __init__(self, **kwargs):
|
| 22 |
+
super(DRIVEDataset, self).__init__(
|
| 23 |
+
img_suffix='.png',
|
| 24 |
+
seg_map_suffix='_manual1.png',
|
| 25 |
+
reduce_zero_label=False,
|
| 26 |
+
**kwargs)
|
| 27 |
+
assert osp.exists(self.img_dir)
|
RAVE-main/annotator/mmpkg/mmseg/datasets/pascal_context.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
from .builder import DATASETS
|
| 4 |
+
from .custom import CustomDataset
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@DATASETS.register_module()
|
| 8 |
+
class PascalContextDataset(CustomDataset):
|
| 9 |
+
"""PascalContext dataset.
|
| 10 |
+
|
| 11 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 12 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 13 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 14 |
+
fixed to '.png'.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
split (str): Split txt file for PascalContext.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
CLASSES = ('background', 'aeroplane', 'bag', 'bed', 'bedclothes', 'bench',
|
| 21 |
+
'bicycle', 'bird', 'boat', 'book', 'bottle', 'building', 'bus',
|
| 22 |
+
'cabinet', 'car', 'cat', 'ceiling', 'chair', 'cloth',
|
| 23 |
+
'computer', 'cow', 'cup', 'curtain', 'dog', 'door', 'fence',
|
| 24 |
+
'floor', 'flower', 'food', 'grass', 'ground', 'horse',
|
| 25 |
+
'keyboard', 'light', 'motorbike', 'mountain', 'mouse', 'person',
|
| 26 |
+
'plate', 'platform', 'pottedplant', 'road', 'rock', 'sheep',
|
| 27 |
+
'shelves', 'sidewalk', 'sign', 'sky', 'snow', 'sofa', 'table',
|
| 28 |
+
'track', 'train', 'tree', 'truck', 'tvmonitor', 'wall', 'water',
|
| 29 |
+
'window', 'wood')
|
| 30 |
+
|
| 31 |
+
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
|
| 32 |
+
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
|
| 33 |
+
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
|
| 34 |
+
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
|
| 35 |
+
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
|
| 36 |
+
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
|
| 37 |
+
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
|
| 38 |
+
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
|
| 39 |
+
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
|
| 40 |
+
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
|
| 41 |
+
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
|
| 42 |
+
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
|
| 43 |
+
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
|
| 44 |
+
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
|
| 45 |
+
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
| 46 |
+
|
| 47 |
+
def __init__(self, split, **kwargs):
|
| 48 |
+
super(PascalContextDataset, self).__init__(
|
| 49 |
+
img_suffix='.jpg',
|
| 50 |
+
seg_map_suffix='.png',
|
| 51 |
+
split=split,
|
| 52 |
+
reduce_zero_label=False,
|
| 53 |
+
**kwargs)
|
| 54 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@DATASETS.register_module()
|
| 58 |
+
class PascalContextDataset59(CustomDataset):
|
| 59 |
+
"""PascalContext dataset.
|
| 60 |
+
|
| 61 |
+
In segmentation map annotation for PascalContext, 0 stands for background,
|
| 62 |
+
which is included in 60 categories. ``reduce_zero_label`` is fixed to
|
| 63 |
+
False. The ``img_suffix`` is fixed to '.jpg' and ``seg_map_suffix`` is
|
| 64 |
+
fixed to '.png'.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
split (str): Split txt file for PascalContext.
|
| 68 |
+
"""
|
| 69 |
+
|
| 70 |
+
CLASSES = ('aeroplane', 'bag', 'bed', 'bedclothes', 'bench', 'bicycle',
|
| 71 |
+
'bird', 'boat', 'book', 'bottle', 'building', 'bus', 'cabinet',
|
| 72 |
+
'car', 'cat', 'ceiling', 'chair', 'cloth', 'computer', 'cow',
|
| 73 |
+
'cup', 'curtain', 'dog', 'door', 'fence', 'floor', 'flower',
|
| 74 |
+
'food', 'grass', 'ground', 'horse', 'keyboard', 'light',
|
| 75 |
+
'motorbike', 'mountain', 'mouse', 'person', 'plate', 'platform',
|
| 76 |
+
'pottedplant', 'road', 'rock', 'sheep', 'shelves', 'sidewalk',
|
| 77 |
+
'sign', 'sky', 'snow', 'sofa', 'table', 'track', 'train',
|
| 78 |
+
'tree', 'truck', 'tvmonitor', 'wall', 'water', 'window', 'wood')
|
| 79 |
+
|
| 80 |
+
PALETTE = [[180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3],
|
| 81 |
+
[120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230],
|
| 82 |
+
[4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61],
|
| 83 |
+
[120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140],
|
| 84 |
+
[204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200],
|
| 85 |
+
[61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71],
|
| 86 |
+
[255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92],
|
| 87 |
+
[112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6],
|
| 88 |
+
[10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8],
|
| 89 |
+
[102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8],
|
| 90 |
+
[0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255],
|
| 91 |
+
[235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140],
|
| 92 |
+
[250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0],
|
| 93 |
+
[255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0],
|
| 94 |
+
[0, 235, 255], [0, 173, 255], [31, 0, 255]]
|
| 95 |
+
|
| 96 |
+
def __init__(self, split, **kwargs):
|
| 97 |
+
super(PascalContextDataset59, self).__init__(
|
| 98 |
+
img_suffix='.jpg',
|
| 99 |
+
seg_map_suffix='.png',
|
| 100 |
+
split=split,
|
| 101 |
+
reduce_zero_label=True,
|
| 102 |
+
**kwargs)
|
| 103 |
+
assert osp.exists(self.img_dir) and self.split is not None
|
RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .compose import Compose
|
| 2 |
+
from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
|
| 3 |
+
Transpose, to_tensor)
|
| 4 |
+
from .loading import LoadAnnotations, LoadImageFromFile
|
| 5 |
+
from .test_time_aug import MultiScaleFlipAug
|
| 6 |
+
from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
|
| 7 |
+
PhotoMetricDistortion, RandomCrop, RandomFlip,
|
| 8 |
+
RandomRotate, Rerange, Resize, RGB2Gray, SegRescale)
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
|
| 12 |
+
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
|
| 13 |
+
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
|
| 14 |
+
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
|
| 15 |
+
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray'
|
| 16 |
+
]
|
RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/compose.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
|
| 3 |
+
from annotator.mmpkg.mmcv.utils import build_from_cfg
|
| 4 |
+
|
| 5 |
+
from ..builder import PIPELINES
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@PIPELINES.register_module()
|
| 9 |
+
class Compose(object):
|
| 10 |
+
"""Compose multiple transforms sequentially.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
transforms (Sequence[dict | callable]): Sequence of transform object or
|
| 14 |
+
config dict to be composed.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, transforms):
|
| 18 |
+
assert isinstance(transforms, collections.abc.Sequence)
|
| 19 |
+
self.transforms = []
|
| 20 |
+
for transform in transforms:
|
| 21 |
+
if isinstance(transform, dict):
|
| 22 |
+
transform = build_from_cfg(transform, PIPELINES)
|
| 23 |
+
self.transforms.append(transform)
|
| 24 |
+
elif callable(transform):
|
| 25 |
+
self.transforms.append(transform)
|
| 26 |
+
else:
|
| 27 |
+
raise TypeError('transform must be callable or a dict')
|
| 28 |
+
|
| 29 |
+
def __call__(self, data):
|
| 30 |
+
"""Call function to apply transforms sequentially.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
data (dict): A result dict contains the data to transform.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
dict: Transformed data.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
for t in self.transforms:
|
| 40 |
+
data = t(data)
|
| 41 |
+
if data is None:
|
| 42 |
+
return None
|
| 43 |
+
return data
|
| 44 |
+
|
| 45 |
+
def __repr__(self):
|
| 46 |
+
format_string = self.__class__.__name__ + '('
|
| 47 |
+
for t in self.transforms:
|
| 48 |
+
format_string += '\n'
|
| 49 |
+
format_string += f' {t}'
|
| 50 |
+
format_string += '\n)'
|
| 51 |
+
return format_string
|
RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/formating.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
|
| 3 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
from annotator.mmpkg.mmcv.parallel import DataContainer as DC
|
| 7 |
+
|
| 8 |
+
from ..builder import PIPELINES
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def to_tensor(data):
|
| 12 |
+
"""Convert objects of various python types to :obj:`torch.Tensor`.
|
| 13 |
+
|
| 14 |
+
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
|
| 15 |
+
:class:`Sequence`, :class:`int` and :class:`float`.
|
| 16 |
+
|
| 17 |
+
Args:
|
| 18 |
+
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
|
| 19 |
+
be converted.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
if isinstance(data, torch.Tensor):
|
| 23 |
+
return data
|
| 24 |
+
elif isinstance(data, np.ndarray):
|
| 25 |
+
return torch.from_numpy(data)
|
| 26 |
+
elif isinstance(data, Sequence) and not mmcv.is_str(data):
|
| 27 |
+
return torch.tensor(data)
|
| 28 |
+
elif isinstance(data, int):
|
| 29 |
+
return torch.LongTensor([data])
|
| 30 |
+
elif isinstance(data, float):
|
| 31 |
+
return torch.FloatTensor([data])
|
| 32 |
+
else:
|
| 33 |
+
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@PIPELINES.register_module()
|
| 37 |
+
class ToTensor(object):
|
| 38 |
+
"""Convert some results to :obj:`torch.Tensor` by given keys.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
keys (Sequence[str]): Keys that need to be converted to Tensor.
|
| 42 |
+
"""
|
| 43 |
+
|
| 44 |
+
def __init__(self, keys):
|
| 45 |
+
self.keys = keys
|
| 46 |
+
|
| 47 |
+
def __call__(self, results):
|
| 48 |
+
"""Call function to convert data in results to :obj:`torch.Tensor`.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
results (dict): Result dict contains the data to convert.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
dict: The result dict contains the data converted
|
| 55 |
+
to :obj:`torch.Tensor`.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
for key in self.keys:
|
| 59 |
+
results[key] = to_tensor(results[key])
|
| 60 |
+
return results
|
| 61 |
+
|
| 62 |
+
def __repr__(self):
|
| 63 |
+
return self.__class__.__name__ + f'(keys={self.keys})'
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@PIPELINES.register_module()
|
| 67 |
+
class ImageToTensor(object):
|
| 68 |
+
"""Convert image to :obj:`torch.Tensor` by given keys.
|
| 69 |
+
|
| 70 |
+
The dimension order of input image is (H, W, C). The pipeline will convert
|
| 71 |
+
it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
|
| 72 |
+
(1, H, W).
|
| 73 |
+
|
| 74 |
+
Args:
|
| 75 |
+
keys (Sequence[str]): Key of images to be converted to Tensor.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
def __init__(self, keys):
|
| 79 |
+
self.keys = keys
|
| 80 |
+
|
| 81 |
+
def __call__(self, results):
|
| 82 |
+
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
| 83 |
+
transpose the channel order.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
results (dict): Result dict contains the image data to convert.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
dict: The result dict contains the image converted
|
| 90 |
+
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
for key in self.keys:
|
| 94 |
+
img = results[key]
|
| 95 |
+
if len(img.shape) < 3:
|
| 96 |
+
img = np.expand_dims(img, -1)
|
| 97 |
+
results[key] = to_tensor(img.transpose(2, 0, 1))
|
| 98 |
+
return results
|
| 99 |
+
|
| 100 |
+
def __repr__(self):
|
| 101 |
+
return self.__class__.__name__ + f'(keys={self.keys})'
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@PIPELINES.register_module()
|
| 105 |
+
class Transpose(object):
|
| 106 |
+
"""Transpose some results by given keys.
|
| 107 |
+
|
| 108 |
+
Args:
|
| 109 |
+
keys (Sequence[str]): Keys of results to be transposed.
|
| 110 |
+
order (Sequence[int]): Order of transpose.
|
| 111 |
+
"""
|
| 112 |
+
|
| 113 |
+
def __init__(self, keys, order):
|
| 114 |
+
self.keys = keys
|
| 115 |
+
self.order = order
|
| 116 |
+
|
| 117 |
+
def __call__(self, results):
|
| 118 |
+
"""Call function to convert image in results to :obj:`torch.Tensor` and
|
| 119 |
+
transpose the channel order.
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
results (dict): Result dict contains the image data to convert.
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
dict: The result dict contains the image converted
|
| 126 |
+
to :obj:`torch.Tensor` and transposed to (C, H, W) order.
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
for key in self.keys:
|
| 130 |
+
results[key] = results[key].transpose(self.order)
|
| 131 |
+
return results
|
| 132 |
+
|
| 133 |
+
def __repr__(self):
|
| 134 |
+
return self.__class__.__name__ + \
|
| 135 |
+
f'(keys={self.keys}, order={self.order})'
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@PIPELINES.register_module()
|
| 139 |
+
class ToDataContainer(object):
|
| 140 |
+
"""Convert results to :obj:`mmcv.DataContainer` by given fields.
|
| 141 |
+
|
| 142 |
+
Args:
|
| 143 |
+
fields (Sequence[dict]): Each field is a dict like
|
| 144 |
+
``dict(key='xxx', **kwargs)``. The ``key`` in result will
|
| 145 |
+
be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
|
| 146 |
+
Default: ``(dict(key='img', stack=True),
|
| 147 |
+
dict(key='gt_semantic_seg'))``.
|
| 148 |
+
"""
|
| 149 |
+
|
| 150 |
+
def __init__(self,
|
| 151 |
+
fields=(dict(key='img',
|
| 152 |
+
stack=True), dict(key='gt_semantic_seg'))):
|
| 153 |
+
self.fields = fields
|
| 154 |
+
|
| 155 |
+
def __call__(self, results):
|
| 156 |
+
"""Call function to convert data in results to
|
| 157 |
+
:obj:`mmcv.DataContainer`.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
results (dict): Result dict contains the data to convert.
|
| 161 |
+
|
| 162 |
+
Returns:
|
| 163 |
+
dict: The result dict contains the data converted to
|
| 164 |
+
:obj:`mmcv.DataContainer`.
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
for field in self.fields:
|
| 168 |
+
field = field.copy()
|
| 169 |
+
key = field.pop('key')
|
| 170 |
+
results[key] = DC(results[key], **field)
|
| 171 |
+
return results
|
| 172 |
+
|
| 173 |
+
def __repr__(self):
|
| 174 |
+
return self.__class__.__name__ + f'(fields={self.fields})'
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@PIPELINES.register_module()
|
| 178 |
+
class DefaultFormatBundle(object):
|
| 179 |
+
"""Default formatting bundle.
|
| 180 |
+
|
| 181 |
+
It simplifies the pipeline of formatting common fields, including "img"
|
| 182 |
+
and "gt_semantic_seg". These fields are formatted as follows.
|
| 183 |
+
|
| 184 |
+
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
|
| 185 |
+
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
|
| 186 |
+
(3)to DataContainer (stack=True)
|
| 187 |
+
"""
|
| 188 |
+
|
| 189 |
+
def __call__(self, results):
|
| 190 |
+
"""Call function to transform and format common fields in results.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
results (dict): Result dict contains the data to convert.
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
dict: The result dict contains the data that is formatted with
|
| 197 |
+
default bundle.
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
if 'img' in results:
|
| 201 |
+
img = results['img']
|
| 202 |
+
if len(img.shape) < 3:
|
| 203 |
+
img = np.expand_dims(img, -1)
|
| 204 |
+
img = np.ascontiguousarray(img.transpose(2, 0, 1))
|
| 205 |
+
results['img'] = DC(to_tensor(img), stack=True)
|
| 206 |
+
if 'gt_semantic_seg' in results:
|
| 207 |
+
# convert to long
|
| 208 |
+
results['gt_semantic_seg'] = DC(
|
| 209 |
+
to_tensor(results['gt_semantic_seg'][None,
|
| 210 |
+
...].astype(np.int64)),
|
| 211 |
+
stack=True)
|
| 212 |
+
return results
|
| 213 |
+
|
| 214 |
+
def __repr__(self):
|
| 215 |
+
return self.__class__.__name__
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
@PIPELINES.register_module()
|
| 219 |
+
class Collect(object):
|
| 220 |
+
"""Collect data from the loader relevant to the specific task.
|
| 221 |
+
|
| 222 |
+
This is usually the last stage of the data loader pipeline. Typically keys
|
| 223 |
+
is set to some subset of "img", "gt_semantic_seg".
|
| 224 |
+
|
| 225 |
+
The "img_meta" item is always populated. The contents of the "img_meta"
|
| 226 |
+
dictionary depends on "meta_keys". By default this includes:
|
| 227 |
+
|
| 228 |
+
- "img_shape": shape of the image input to the network as a tuple
|
| 229 |
+
(h, w, c). Note that images may be zero padded on the bottom/right
|
| 230 |
+
if the batch tensor is larger than this shape.
|
| 231 |
+
|
| 232 |
+
- "scale_factor": a float indicating the preprocessing scale
|
| 233 |
+
|
| 234 |
+
- "flip": a boolean indicating if image flip transform was used
|
| 235 |
+
|
| 236 |
+
- "filename": path to the image file
|
| 237 |
+
|
| 238 |
+
- "ori_shape": original shape of the image as a tuple (h, w, c)
|
| 239 |
+
|
| 240 |
+
- "pad_shape": image shape after padding
|
| 241 |
+
|
| 242 |
+
- "img_norm_cfg": a dict of normalization information:
|
| 243 |
+
- mean - per channel mean subtraction
|
| 244 |
+
- std - per channel std divisor
|
| 245 |
+
- to_rgb - bool indicating if bgr was converted to rgb
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
keys (Sequence[str]): Keys of results to be collected in ``data``.
|
| 249 |
+
meta_keys (Sequence[str], optional): Meta keys to be converted to
|
| 250 |
+
``mmcv.DataContainer`` and collected in ``data[img_metas]``.
|
| 251 |
+
Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
|
| 252 |
+
'pad_shape', 'scale_factor', 'flip', 'flip_direction',
|
| 253 |
+
'img_norm_cfg')``
|
| 254 |
+
"""
|
| 255 |
+
|
| 256 |
+
def __init__(self,
|
| 257 |
+
keys,
|
| 258 |
+
meta_keys=('filename', 'ori_filename', 'ori_shape',
|
| 259 |
+
'img_shape', 'pad_shape', 'scale_factor', 'flip',
|
| 260 |
+
'flip_direction', 'img_norm_cfg')):
|
| 261 |
+
self.keys = keys
|
| 262 |
+
self.meta_keys = meta_keys
|
| 263 |
+
|
| 264 |
+
def __call__(self, results):
|
| 265 |
+
"""Call function to collect keys in results. The keys in ``meta_keys``
|
| 266 |
+
will be converted to :obj:mmcv.DataContainer.
|
| 267 |
+
|
| 268 |
+
Args:
|
| 269 |
+
results (dict): Result dict contains the data to collect.
|
| 270 |
+
|
| 271 |
+
Returns:
|
| 272 |
+
dict: The result dict contains the following keys
|
| 273 |
+
- keys in``self.keys``
|
| 274 |
+
- ``img_metas``
|
| 275 |
+
"""
|
| 276 |
+
|
| 277 |
+
data = {}
|
| 278 |
+
img_meta = {}
|
| 279 |
+
for key in self.meta_keys:
|
| 280 |
+
img_meta[key] = results[key]
|
| 281 |
+
data['img_metas'] = DC(img_meta, cpu_only=True)
|
| 282 |
+
for key in self.keys:
|
| 283 |
+
data[key] = results[key]
|
| 284 |
+
return data
|
| 285 |
+
|
| 286 |
+
def __repr__(self):
|
| 287 |
+
return self.__class__.__name__ + \
|
| 288 |
+
f'(keys={self.keys}, meta_keys={self.meta_keys})'
|
RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/loading.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os.path as osp
|
| 2 |
+
|
| 3 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from ..builder import PIPELINES
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class LoadImageFromFile(object):
|
| 11 |
+
"""Load an image from file.
|
| 12 |
+
|
| 13 |
+
Required keys are "img_prefix" and "img_info" (a dict that must contain the
|
| 14 |
+
key "filename"). Added or updated keys are "filename", "img", "img_shape",
|
| 15 |
+
"ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
|
| 16 |
+
"scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
to_float32 (bool): Whether to convert the loaded image to a float32
|
| 20 |
+
numpy array. If set to False, the loaded image is an uint8 array.
|
| 21 |
+
Defaults to False.
|
| 22 |
+
color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
|
| 23 |
+
Defaults to 'color'.
|
| 24 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 25 |
+
See :class:`mmcv.fileio.FileClient` for details.
|
| 26 |
+
Defaults to ``dict(backend='disk')``.
|
| 27 |
+
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
|
| 28 |
+
'cv2'
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self,
|
| 32 |
+
to_float32=False,
|
| 33 |
+
color_type='color',
|
| 34 |
+
file_client_args=dict(backend='disk'),
|
| 35 |
+
imdecode_backend='cv2'):
|
| 36 |
+
self.to_float32 = to_float32
|
| 37 |
+
self.color_type = color_type
|
| 38 |
+
self.file_client_args = file_client_args.copy()
|
| 39 |
+
self.file_client = None
|
| 40 |
+
self.imdecode_backend = imdecode_backend
|
| 41 |
+
|
| 42 |
+
def __call__(self, results):
|
| 43 |
+
"""Call functions to load image and get image meta information.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
dict: The dict contains loaded image and meta information.
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
if self.file_client is None:
|
| 53 |
+
self.file_client = mmcv.FileClient(**self.file_client_args)
|
| 54 |
+
|
| 55 |
+
if results.get('img_prefix') is not None:
|
| 56 |
+
filename = osp.join(results['img_prefix'],
|
| 57 |
+
results['img_info']['filename'])
|
| 58 |
+
else:
|
| 59 |
+
filename = results['img_info']['filename']
|
| 60 |
+
img_bytes = self.file_client.get(filename)
|
| 61 |
+
img = mmcv.imfrombytes(
|
| 62 |
+
img_bytes, flag=self.color_type, backend=self.imdecode_backend)
|
| 63 |
+
if self.to_float32:
|
| 64 |
+
img = img.astype(np.float32)
|
| 65 |
+
|
| 66 |
+
results['filename'] = filename
|
| 67 |
+
results['ori_filename'] = results['img_info']['filename']
|
| 68 |
+
results['img'] = img
|
| 69 |
+
results['img_shape'] = img.shape
|
| 70 |
+
results['ori_shape'] = img.shape
|
| 71 |
+
# Set initial values for default meta_keys
|
| 72 |
+
results['pad_shape'] = img.shape
|
| 73 |
+
results['scale_factor'] = 1.0
|
| 74 |
+
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
|
| 75 |
+
results['img_norm_cfg'] = dict(
|
| 76 |
+
mean=np.zeros(num_channels, dtype=np.float32),
|
| 77 |
+
std=np.ones(num_channels, dtype=np.float32),
|
| 78 |
+
to_rgb=False)
|
| 79 |
+
return results
|
| 80 |
+
|
| 81 |
+
def __repr__(self):
|
| 82 |
+
repr_str = self.__class__.__name__
|
| 83 |
+
repr_str += f'(to_float32={self.to_float32},'
|
| 84 |
+
repr_str += f"color_type='{self.color_type}',"
|
| 85 |
+
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
|
| 86 |
+
return repr_str
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@PIPELINES.register_module()
|
| 90 |
+
class LoadAnnotations(object):
|
| 91 |
+
"""Load annotations for semantic segmentation.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
reduce_zero_label (bool): Whether reduce all label value by 1.
|
| 95 |
+
Usually used for datasets where 0 is background label.
|
| 96 |
+
Default: False.
|
| 97 |
+
file_client_args (dict): Arguments to instantiate a FileClient.
|
| 98 |
+
See :class:`mmcv.fileio.FileClient` for details.
|
| 99 |
+
Defaults to ``dict(backend='disk')``.
|
| 100 |
+
imdecode_backend (str): Backend for :func:`mmcv.imdecode`. Default:
|
| 101 |
+
'pillow'
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(self,
|
| 105 |
+
reduce_zero_label=False,
|
| 106 |
+
file_client_args=dict(backend='disk'),
|
| 107 |
+
imdecode_backend='pillow'):
|
| 108 |
+
self.reduce_zero_label = reduce_zero_label
|
| 109 |
+
self.file_client_args = file_client_args.copy()
|
| 110 |
+
self.file_client = None
|
| 111 |
+
self.imdecode_backend = imdecode_backend
|
| 112 |
+
|
| 113 |
+
def __call__(self, results):
|
| 114 |
+
"""Call function to load multiple types annotations.
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
results (dict): Result dict from :obj:`mmseg.CustomDataset`.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
dict: The dict contains loaded semantic segmentation annotations.
|
| 121 |
+
"""
|
| 122 |
+
|
| 123 |
+
if self.file_client is None:
|
| 124 |
+
self.file_client = mmcv.FileClient(**self.file_client_args)
|
| 125 |
+
|
| 126 |
+
if results.get('seg_prefix', None) is not None:
|
| 127 |
+
filename = osp.join(results['seg_prefix'],
|
| 128 |
+
results['ann_info']['seg_map'])
|
| 129 |
+
else:
|
| 130 |
+
filename = results['ann_info']['seg_map']
|
| 131 |
+
img_bytes = self.file_client.get(filename)
|
| 132 |
+
gt_semantic_seg = mmcv.imfrombytes(
|
| 133 |
+
img_bytes, flag='unchanged',
|
| 134 |
+
backend=self.imdecode_backend).squeeze().astype(np.uint8)
|
| 135 |
+
# modify if custom classes
|
| 136 |
+
if results.get('label_map', None) is not None:
|
| 137 |
+
for old_id, new_id in results['label_map'].items():
|
| 138 |
+
gt_semantic_seg[gt_semantic_seg == old_id] = new_id
|
| 139 |
+
# reduce zero_label
|
| 140 |
+
if self.reduce_zero_label:
|
| 141 |
+
# avoid using underflow conversion
|
| 142 |
+
gt_semantic_seg[gt_semantic_seg == 0] = 255
|
| 143 |
+
gt_semantic_seg = gt_semantic_seg - 1
|
| 144 |
+
gt_semantic_seg[gt_semantic_seg == 254] = 255
|
| 145 |
+
results['gt_semantic_seg'] = gt_semantic_seg
|
| 146 |
+
results['seg_fields'].append('gt_semantic_seg')
|
| 147 |
+
return results
|
| 148 |
+
|
| 149 |
+
def __repr__(self):
|
| 150 |
+
repr_str = self.__class__.__name__
|
| 151 |
+
repr_str += f'(reduce_zero_label={self.reduce_zero_label},'
|
| 152 |
+
repr_str += f"imdecode_backend='{self.imdecode_backend}')"
|
| 153 |
+
return repr_str
|
RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/test_time_aug.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 4 |
+
|
| 5 |
+
from ..builder import PIPELINES
|
| 6 |
+
from .compose import Compose
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class MultiScaleFlipAug(object):
|
| 11 |
+
"""Test-time augmentation with multiple scales and flipping.
|
| 12 |
+
|
| 13 |
+
An example configuration is as followed:
|
| 14 |
+
|
| 15 |
+
.. code-block::
|
| 16 |
+
|
| 17 |
+
img_scale=(2048, 1024),
|
| 18 |
+
img_ratios=[0.5, 1.0],
|
| 19 |
+
flip=True,
|
| 20 |
+
transforms=[
|
| 21 |
+
dict(type='Resize', keep_ratio=True),
|
| 22 |
+
dict(type='RandomFlip'),
|
| 23 |
+
dict(type='Normalize', **img_norm_cfg),
|
| 24 |
+
dict(type='Pad', size_divisor=32),
|
| 25 |
+
dict(type='ImageToTensor', keys=['img']),
|
| 26 |
+
dict(type='Collect', keys=['img']),
|
| 27 |
+
]
|
| 28 |
+
|
| 29 |
+
After MultiScaleFLipAug with above configuration, the results are wrapped
|
| 30 |
+
into lists of the same length as followed:
|
| 31 |
+
|
| 32 |
+
.. code-block::
|
| 33 |
+
|
| 34 |
+
dict(
|
| 35 |
+
img=[...],
|
| 36 |
+
img_shape=[...],
|
| 37 |
+
scale=[(1024, 512), (1024, 512), (2048, 1024), (2048, 1024)]
|
| 38 |
+
flip=[False, True, False, True]
|
| 39 |
+
...
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
transforms (list[dict]): Transforms to apply in each augmentation.
|
| 44 |
+
img_scale (None | tuple | list[tuple]): Images scales for resizing.
|
| 45 |
+
img_ratios (float | list[float]): Image ratios for resizing
|
| 46 |
+
flip (bool): Whether apply flip augmentation. Default: False.
|
| 47 |
+
flip_direction (str | list[str]): Flip augmentation directions,
|
| 48 |
+
options are "horizontal" and "vertical". If flip_direction is list,
|
| 49 |
+
multiple flip augmentations will be applied.
|
| 50 |
+
It has no effect when flip == False. Default: "horizontal".
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self,
|
| 54 |
+
transforms,
|
| 55 |
+
img_scale,
|
| 56 |
+
img_ratios=None,
|
| 57 |
+
flip=False,
|
| 58 |
+
flip_direction='horizontal'):
|
| 59 |
+
self.transforms = Compose(transforms)
|
| 60 |
+
if img_ratios is not None:
|
| 61 |
+
img_ratios = img_ratios if isinstance(img_ratios,
|
| 62 |
+
list) else [img_ratios]
|
| 63 |
+
assert mmcv.is_list_of(img_ratios, float)
|
| 64 |
+
if img_scale is None:
|
| 65 |
+
# mode 1: given img_scale=None and a range of image ratio
|
| 66 |
+
self.img_scale = None
|
| 67 |
+
assert mmcv.is_list_of(img_ratios, float)
|
| 68 |
+
elif isinstance(img_scale, tuple) and mmcv.is_list_of(
|
| 69 |
+
img_ratios, float):
|
| 70 |
+
assert len(img_scale) == 2
|
| 71 |
+
# mode 2: given a scale and a range of image ratio
|
| 72 |
+
self.img_scale = [(int(img_scale[0] * ratio),
|
| 73 |
+
int(img_scale[1] * ratio))
|
| 74 |
+
for ratio in img_ratios]
|
| 75 |
+
else:
|
| 76 |
+
# mode 3: given multiple scales
|
| 77 |
+
self.img_scale = img_scale if isinstance(img_scale,
|
| 78 |
+
list) else [img_scale]
|
| 79 |
+
assert mmcv.is_list_of(self.img_scale, tuple) or self.img_scale is None
|
| 80 |
+
self.flip = flip
|
| 81 |
+
self.img_ratios = img_ratios
|
| 82 |
+
self.flip_direction = flip_direction if isinstance(
|
| 83 |
+
flip_direction, list) else [flip_direction]
|
| 84 |
+
assert mmcv.is_list_of(self.flip_direction, str)
|
| 85 |
+
if not self.flip and self.flip_direction != ['horizontal']:
|
| 86 |
+
warnings.warn(
|
| 87 |
+
'flip_direction has no effect when flip is set to False')
|
| 88 |
+
if (self.flip
|
| 89 |
+
and not any([t['type'] == 'RandomFlip' for t in transforms])):
|
| 90 |
+
warnings.warn(
|
| 91 |
+
'flip has no effect when RandomFlip is not in transforms')
|
| 92 |
+
|
| 93 |
+
def __call__(self, results):
|
| 94 |
+
"""Call function to apply test time augment transforms on results.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
results (dict): Result dict contains the data to transform.
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
dict[str: list]: The augmented data, where each value is wrapped
|
| 101 |
+
into a list.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
aug_data = []
|
| 105 |
+
if self.img_scale is None and mmcv.is_list_of(self.img_ratios, float):
|
| 106 |
+
h, w = results['img'].shape[:2]
|
| 107 |
+
img_scale = [(int(w * ratio), int(h * ratio))
|
| 108 |
+
for ratio in self.img_ratios]
|
| 109 |
+
else:
|
| 110 |
+
img_scale = self.img_scale
|
| 111 |
+
flip_aug = [False, True] if self.flip else [False]
|
| 112 |
+
for scale in img_scale:
|
| 113 |
+
for flip in flip_aug:
|
| 114 |
+
for direction in self.flip_direction:
|
| 115 |
+
_results = results.copy()
|
| 116 |
+
_results['scale'] = scale
|
| 117 |
+
_results['flip'] = flip
|
| 118 |
+
_results['flip_direction'] = direction
|
| 119 |
+
data = self.transforms(_results)
|
| 120 |
+
aug_data.append(data)
|
| 121 |
+
# list of dict to dict of list
|
| 122 |
+
aug_data_dict = {key: [] for key in aug_data[0]}
|
| 123 |
+
for data in aug_data:
|
| 124 |
+
for key, val in data.items():
|
| 125 |
+
aug_data_dict[key].append(val)
|
| 126 |
+
return aug_data_dict
|
| 127 |
+
|
| 128 |
+
def __repr__(self):
|
| 129 |
+
repr_str = self.__class__.__name__
|
| 130 |
+
repr_str += f'(transforms={self.transforms}, '
|
| 131 |
+
repr_str += f'img_scale={self.img_scale}, flip={self.flip})'
|
| 132 |
+
repr_str += f'flip_direction={self.flip_direction}'
|
| 133 |
+
return repr_str
|
RAVE-main/annotator/mmpkg/mmseg/datasets/pipelines/transforms.py
ADDED
|
@@ -0,0 +1,889 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import annotator.mmpkg.mmcv as mmcv
|
| 2 |
+
import numpy as np
|
| 3 |
+
from annotator.mmpkg.mmcv.utils import deprecated_api_warning, is_tuple_of
|
| 4 |
+
from numpy import random
|
| 5 |
+
|
| 6 |
+
from ..builder import PIPELINES
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@PIPELINES.register_module()
|
| 10 |
+
class Resize(object):
|
| 11 |
+
"""Resize images & seg.
|
| 12 |
+
|
| 13 |
+
This transform resizes the input image to some scale. If the input dict
|
| 14 |
+
contains the key "scale", then the scale in the input dict is used,
|
| 15 |
+
otherwise the specified scale in the init method is used.
|
| 16 |
+
|
| 17 |
+
``img_scale`` can be None, a tuple (single-scale) or a list of tuple
|
| 18 |
+
(multi-scale). There are 4 multiscale modes:
|
| 19 |
+
|
| 20 |
+
- ``ratio_range is not None``:
|
| 21 |
+
1. When img_scale is None, img_scale is the shape of image in results
|
| 22 |
+
(img_scale = results['img'].shape[:2]) and the image is resized based
|
| 23 |
+
on the original size. (mode 1)
|
| 24 |
+
2. When img_scale is a tuple (single-scale), randomly sample a ratio from
|
| 25 |
+
the ratio range and multiply it with the image scale. (mode 2)
|
| 26 |
+
|
| 27 |
+
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
|
| 28 |
+
scale from the a range. (mode 3)
|
| 29 |
+
|
| 30 |
+
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
|
| 31 |
+
scale from multiple scales. (mode 4)
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
img_scale (tuple or list[tuple]): Images scales for resizing.
|
| 35 |
+
multiscale_mode (str): Either "range" or "value".
|
| 36 |
+
ratio_range (tuple[float]): (min_ratio, max_ratio)
|
| 37 |
+
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
|
| 38 |
+
image.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
def __init__(self,
|
| 42 |
+
img_scale=None,
|
| 43 |
+
multiscale_mode='range',
|
| 44 |
+
ratio_range=None,
|
| 45 |
+
keep_ratio=True):
|
| 46 |
+
if img_scale is None:
|
| 47 |
+
self.img_scale = None
|
| 48 |
+
else:
|
| 49 |
+
if isinstance(img_scale, list):
|
| 50 |
+
self.img_scale = img_scale
|
| 51 |
+
else:
|
| 52 |
+
self.img_scale = [img_scale]
|
| 53 |
+
assert mmcv.is_list_of(self.img_scale, tuple)
|
| 54 |
+
|
| 55 |
+
if ratio_range is not None:
|
| 56 |
+
# mode 1: given img_scale=None and a range of image ratio
|
| 57 |
+
# mode 2: given a scale and a range of image ratio
|
| 58 |
+
assert self.img_scale is None or len(self.img_scale) == 1
|
| 59 |
+
else:
|
| 60 |
+
# mode 3 and 4: given multiple scales or a range of scales
|
| 61 |
+
assert multiscale_mode in ['value', 'range']
|
| 62 |
+
|
| 63 |
+
self.multiscale_mode = multiscale_mode
|
| 64 |
+
self.ratio_range = ratio_range
|
| 65 |
+
self.keep_ratio = keep_ratio
|
| 66 |
+
|
| 67 |
+
@staticmethod
|
| 68 |
+
def random_select(img_scales):
|
| 69 |
+
"""Randomly select an img_scale from given candidates.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
img_scales (list[tuple]): Images scales for selection.
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
|
| 76 |
+
where ``img_scale`` is the selected image scale and
|
| 77 |
+
``scale_idx`` is the selected index in the given candidates.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
assert mmcv.is_list_of(img_scales, tuple)
|
| 81 |
+
scale_idx = np.random.randint(len(img_scales))
|
| 82 |
+
img_scale = img_scales[scale_idx]
|
| 83 |
+
return img_scale, scale_idx
|
| 84 |
+
|
| 85 |
+
@staticmethod
|
| 86 |
+
def random_sample(img_scales):
|
| 87 |
+
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
|
| 88 |
+
|
| 89 |
+
Args:
|
| 90 |
+
img_scales (list[tuple]): Images scale range for sampling.
|
| 91 |
+
There must be two tuples in img_scales, which specify the lower
|
| 92 |
+
and upper bound of image scales.
|
| 93 |
+
|
| 94 |
+
Returns:
|
| 95 |
+
(tuple, None): Returns a tuple ``(img_scale, None)``, where
|
| 96 |
+
``img_scale`` is sampled scale and None is just a placeholder
|
| 97 |
+
to be consistent with :func:`random_select`.
|
| 98 |
+
"""
|
| 99 |
+
|
| 100 |
+
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
|
| 101 |
+
img_scale_long = [max(s) for s in img_scales]
|
| 102 |
+
img_scale_short = [min(s) for s in img_scales]
|
| 103 |
+
long_edge = np.random.randint(
|
| 104 |
+
min(img_scale_long),
|
| 105 |
+
max(img_scale_long) + 1)
|
| 106 |
+
short_edge = np.random.randint(
|
| 107 |
+
min(img_scale_short),
|
| 108 |
+
max(img_scale_short) + 1)
|
| 109 |
+
img_scale = (long_edge, short_edge)
|
| 110 |
+
return img_scale, None
|
| 111 |
+
|
| 112 |
+
@staticmethod
|
| 113 |
+
def random_sample_ratio(img_scale, ratio_range):
|
| 114 |
+
"""Randomly sample an img_scale when ``ratio_range`` is specified.
|
| 115 |
+
|
| 116 |
+
A ratio will be randomly sampled from the range specified by
|
| 117 |
+
``ratio_range``. Then it would be multiplied with ``img_scale`` to
|
| 118 |
+
generate sampled scale.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
img_scale (tuple): Images scale base to multiply with ratio.
|
| 122 |
+
ratio_range (tuple[float]): The minimum and maximum ratio to scale
|
| 123 |
+
the ``img_scale``.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
(tuple, None): Returns a tuple ``(scale, None)``, where
|
| 127 |
+
``scale`` is sampled ratio multiplied with ``img_scale`` and
|
| 128 |
+
None is just a placeholder to be consistent with
|
| 129 |
+
:func:`random_select`.
|
| 130 |
+
"""
|
| 131 |
+
|
| 132 |
+
assert isinstance(img_scale, tuple) and len(img_scale) == 2
|
| 133 |
+
min_ratio, max_ratio = ratio_range
|
| 134 |
+
assert min_ratio <= max_ratio
|
| 135 |
+
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
|
| 136 |
+
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
|
| 137 |
+
return scale, None
|
| 138 |
+
|
| 139 |
+
def _random_scale(self, results):
|
| 140 |
+
"""Randomly sample an img_scale according to ``ratio_range`` and
|
| 141 |
+
``multiscale_mode``.
|
| 142 |
+
|
| 143 |
+
If ``ratio_range`` is specified, a ratio will be sampled and be
|
| 144 |
+
multiplied with ``img_scale``.
|
| 145 |
+
If multiple scales are specified by ``img_scale``, a scale will be
|
| 146 |
+
sampled according to ``multiscale_mode``.
|
| 147 |
+
Otherwise, single scale will be used.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
results (dict): Result dict from :obj:`dataset`.
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
dict: Two new keys 'scale` and 'scale_idx` are added into
|
| 154 |
+
``results``, which would be used by subsequent pipelines.
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
if self.ratio_range is not None:
|
| 158 |
+
if self.img_scale is None:
|
| 159 |
+
h, w = results['img'].shape[:2]
|
| 160 |
+
scale, scale_idx = self.random_sample_ratio((w, h),
|
| 161 |
+
self.ratio_range)
|
| 162 |
+
else:
|
| 163 |
+
scale, scale_idx = self.random_sample_ratio(
|
| 164 |
+
self.img_scale[0], self.ratio_range)
|
| 165 |
+
elif len(self.img_scale) == 1:
|
| 166 |
+
scale, scale_idx = self.img_scale[0], 0
|
| 167 |
+
elif self.multiscale_mode == 'range':
|
| 168 |
+
scale, scale_idx = self.random_sample(self.img_scale)
|
| 169 |
+
elif self.multiscale_mode == 'value':
|
| 170 |
+
scale, scale_idx = self.random_select(self.img_scale)
|
| 171 |
+
else:
|
| 172 |
+
raise NotImplementedError
|
| 173 |
+
|
| 174 |
+
results['scale'] = scale
|
| 175 |
+
results['scale_idx'] = scale_idx
|
| 176 |
+
|
| 177 |
+
def _resize_img(self, results):
|
| 178 |
+
"""Resize images with ``results['scale']``."""
|
| 179 |
+
if self.keep_ratio:
|
| 180 |
+
img, scale_factor = mmcv.imrescale(
|
| 181 |
+
results['img'], results['scale'], return_scale=True)
|
| 182 |
+
# the w_scale and h_scale has minor difference
|
| 183 |
+
# a real fix should be done in the mmcv.imrescale in the future
|
| 184 |
+
new_h, new_w = img.shape[:2]
|
| 185 |
+
h, w = results['img'].shape[:2]
|
| 186 |
+
w_scale = new_w / w
|
| 187 |
+
h_scale = new_h / h
|
| 188 |
+
else:
|
| 189 |
+
img, w_scale, h_scale = mmcv.imresize(
|
| 190 |
+
results['img'], results['scale'], return_scale=True)
|
| 191 |
+
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
|
| 192 |
+
dtype=np.float32)
|
| 193 |
+
results['img'] = img
|
| 194 |
+
results['img_shape'] = img.shape
|
| 195 |
+
results['pad_shape'] = img.shape # in case that there is no padding
|
| 196 |
+
results['scale_factor'] = scale_factor
|
| 197 |
+
results['keep_ratio'] = self.keep_ratio
|
| 198 |
+
|
| 199 |
+
def _resize_seg(self, results):
|
| 200 |
+
"""Resize semantic segmentation map with ``results['scale']``."""
|
| 201 |
+
for key in results.get('seg_fields', []):
|
| 202 |
+
if self.keep_ratio:
|
| 203 |
+
gt_seg = mmcv.imrescale(
|
| 204 |
+
results[key], results['scale'], interpolation='nearest')
|
| 205 |
+
else:
|
| 206 |
+
gt_seg = mmcv.imresize(
|
| 207 |
+
results[key], results['scale'], interpolation='nearest')
|
| 208 |
+
results[key] = gt_seg
|
| 209 |
+
|
| 210 |
+
def __call__(self, results):
|
| 211 |
+
"""Call function to resize images, bounding boxes, masks, semantic
|
| 212 |
+
segmentation map.
|
| 213 |
+
|
| 214 |
+
Args:
|
| 215 |
+
results (dict): Result dict from loading pipeline.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
|
| 219 |
+
'keep_ratio' keys are added into result dict.
|
| 220 |
+
"""
|
| 221 |
+
|
| 222 |
+
if 'scale' not in results:
|
| 223 |
+
self._random_scale(results)
|
| 224 |
+
self._resize_img(results)
|
| 225 |
+
self._resize_seg(results)
|
| 226 |
+
return results
|
| 227 |
+
|
| 228 |
+
def __repr__(self):
|
| 229 |
+
repr_str = self.__class__.__name__
|
| 230 |
+
repr_str += (f'(img_scale={self.img_scale}, '
|
| 231 |
+
f'multiscale_mode={self.multiscale_mode}, '
|
| 232 |
+
f'ratio_range={self.ratio_range}, '
|
| 233 |
+
f'keep_ratio={self.keep_ratio})')
|
| 234 |
+
return repr_str
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
@PIPELINES.register_module()
|
| 238 |
+
class RandomFlip(object):
|
| 239 |
+
"""Flip the image & seg.
|
| 240 |
+
|
| 241 |
+
If the input dict contains the key "flip", then the flag will be used,
|
| 242 |
+
otherwise it will be randomly decided by a ratio specified in the init
|
| 243 |
+
method.
|
| 244 |
+
|
| 245 |
+
Args:
|
| 246 |
+
prob (float, optional): The flipping probability. Default: None.
|
| 247 |
+
direction(str, optional): The flipping direction. Options are
|
| 248 |
+
'horizontal' and 'vertical'. Default: 'horizontal'.
|
| 249 |
+
"""
|
| 250 |
+
|
| 251 |
+
@deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
|
| 252 |
+
def __init__(self, prob=None, direction='horizontal'):
|
| 253 |
+
self.prob = prob
|
| 254 |
+
self.direction = direction
|
| 255 |
+
if prob is not None:
|
| 256 |
+
assert prob >= 0 and prob <= 1
|
| 257 |
+
assert direction in ['horizontal', 'vertical']
|
| 258 |
+
|
| 259 |
+
def __call__(self, results):
|
| 260 |
+
"""Call function to flip bounding boxes, masks, semantic segmentation
|
| 261 |
+
maps.
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
results (dict): Result dict from loading pipeline.
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
dict: Flipped results, 'flip', 'flip_direction' keys are added into
|
| 268 |
+
result dict.
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
if 'flip' not in results:
|
| 272 |
+
flip = True if np.random.rand() < self.prob else False
|
| 273 |
+
results['flip'] = flip
|
| 274 |
+
if 'flip_direction' not in results:
|
| 275 |
+
results['flip_direction'] = self.direction
|
| 276 |
+
if results['flip']:
|
| 277 |
+
# flip image
|
| 278 |
+
results['img'] = mmcv.imflip(
|
| 279 |
+
results['img'], direction=results['flip_direction'])
|
| 280 |
+
|
| 281 |
+
# flip segs
|
| 282 |
+
for key in results.get('seg_fields', []):
|
| 283 |
+
# use copy() to make numpy stride positive
|
| 284 |
+
results[key] = mmcv.imflip(
|
| 285 |
+
results[key], direction=results['flip_direction']).copy()
|
| 286 |
+
return results
|
| 287 |
+
|
| 288 |
+
def __repr__(self):
|
| 289 |
+
return self.__class__.__name__ + f'(prob={self.prob})'
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
@PIPELINES.register_module()
|
| 293 |
+
class Pad(object):
|
| 294 |
+
"""Pad the image & mask.
|
| 295 |
+
|
| 296 |
+
There are two padding modes: (1) pad to a fixed size and (2) pad to the
|
| 297 |
+
minimum size that is divisible by some number.
|
| 298 |
+
Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
size (tuple, optional): Fixed padding size.
|
| 302 |
+
size_divisor (int, optional): The divisor of padded size.
|
| 303 |
+
pad_val (float, optional): Padding value. Default: 0.
|
| 304 |
+
seg_pad_val (float, optional): Padding value of segmentation map.
|
| 305 |
+
Default: 255.
|
| 306 |
+
"""
|
| 307 |
+
|
| 308 |
+
def __init__(self,
|
| 309 |
+
size=None,
|
| 310 |
+
size_divisor=None,
|
| 311 |
+
pad_val=0,
|
| 312 |
+
seg_pad_val=255):
|
| 313 |
+
self.size = size
|
| 314 |
+
self.size_divisor = size_divisor
|
| 315 |
+
self.pad_val = pad_val
|
| 316 |
+
self.seg_pad_val = seg_pad_val
|
| 317 |
+
# only one of size and size_divisor should be valid
|
| 318 |
+
assert size is not None or size_divisor is not None
|
| 319 |
+
assert size is None or size_divisor is None
|
| 320 |
+
|
| 321 |
+
def _pad_img(self, results):
|
| 322 |
+
"""Pad images according to ``self.size``."""
|
| 323 |
+
if self.size is not None:
|
| 324 |
+
padded_img = mmcv.impad(
|
| 325 |
+
results['img'], shape=self.size, pad_val=self.pad_val)
|
| 326 |
+
elif self.size_divisor is not None:
|
| 327 |
+
padded_img = mmcv.impad_to_multiple(
|
| 328 |
+
results['img'], self.size_divisor, pad_val=self.pad_val)
|
| 329 |
+
results['img'] = padded_img
|
| 330 |
+
results['pad_shape'] = padded_img.shape
|
| 331 |
+
results['pad_fixed_size'] = self.size
|
| 332 |
+
results['pad_size_divisor'] = self.size_divisor
|
| 333 |
+
|
| 334 |
+
def _pad_seg(self, results):
|
| 335 |
+
"""Pad masks according to ``results['pad_shape']``."""
|
| 336 |
+
for key in results.get('seg_fields', []):
|
| 337 |
+
results[key] = mmcv.impad(
|
| 338 |
+
results[key],
|
| 339 |
+
shape=results['pad_shape'][:2],
|
| 340 |
+
pad_val=self.seg_pad_val)
|
| 341 |
+
|
| 342 |
+
def __call__(self, results):
|
| 343 |
+
"""Call function to pad images, masks, semantic segmentation maps.
|
| 344 |
+
|
| 345 |
+
Args:
|
| 346 |
+
results (dict): Result dict from loading pipeline.
|
| 347 |
+
|
| 348 |
+
Returns:
|
| 349 |
+
dict: Updated result dict.
|
| 350 |
+
"""
|
| 351 |
+
|
| 352 |
+
self._pad_img(results)
|
| 353 |
+
self._pad_seg(results)
|
| 354 |
+
return results
|
| 355 |
+
|
| 356 |
+
def __repr__(self):
|
| 357 |
+
repr_str = self.__class__.__name__
|
| 358 |
+
repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
|
| 359 |
+
f'pad_val={self.pad_val})'
|
| 360 |
+
return repr_str
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
@PIPELINES.register_module()
|
| 364 |
+
class Normalize(object):
|
| 365 |
+
"""Normalize the image.
|
| 366 |
+
|
| 367 |
+
Added key is "img_norm_cfg".
|
| 368 |
+
|
| 369 |
+
Args:
|
| 370 |
+
mean (sequence): Mean values of 3 channels.
|
| 371 |
+
std (sequence): Std values of 3 channels.
|
| 372 |
+
to_rgb (bool): Whether to convert the image from BGR to RGB,
|
| 373 |
+
default is true.
|
| 374 |
+
"""
|
| 375 |
+
|
| 376 |
+
def __init__(self, mean, std, to_rgb=True):
|
| 377 |
+
self.mean = np.array(mean, dtype=np.float32)
|
| 378 |
+
self.std = np.array(std, dtype=np.float32)
|
| 379 |
+
self.to_rgb = to_rgb
|
| 380 |
+
|
| 381 |
+
def __call__(self, results):
|
| 382 |
+
"""Call function to normalize images.
|
| 383 |
+
|
| 384 |
+
Args:
|
| 385 |
+
results (dict): Result dict from loading pipeline.
|
| 386 |
+
|
| 387 |
+
Returns:
|
| 388 |
+
dict: Normalized results, 'img_norm_cfg' key is added into
|
| 389 |
+
result dict.
|
| 390 |
+
"""
|
| 391 |
+
|
| 392 |
+
results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
|
| 393 |
+
self.to_rgb)
|
| 394 |
+
results['img_norm_cfg'] = dict(
|
| 395 |
+
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
|
| 396 |
+
return results
|
| 397 |
+
|
| 398 |
+
def __repr__(self):
|
| 399 |
+
repr_str = self.__class__.__name__
|
| 400 |
+
repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
|
| 401 |
+
f'{self.to_rgb})'
|
| 402 |
+
return repr_str
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
@PIPELINES.register_module()
|
| 406 |
+
class Rerange(object):
|
| 407 |
+
"""Rerange the image pixel value.
|
| 408 |
+
|
| 409 |
+
Args:
|
| 410 |
+
min_value (float or int): Minimum value of the reranged image.
|
| 411 |
+
Default: 0.
|
| 412 |
+
max_value (float or int): Maximum value of the reranged image.
|
| 413 |
+
Default: 255.
|
| 414 |
+
"""
|
| 415 |
+
|
| 416 |
+
def __init__(self, min_value=0, max_value=255):
|
| 417 |
+
assert isinstance(min_value, float) or isinstance(min_value, int)
|
| 418 |
+
assert isinstance(max_value, float) or isinstance(max_value, int)
|
| 419 |
+
assert min_value < max_value
|
| 420 |
+
self.min_value = min_value
|
| 421 |
+
self.max_value = max_value
|
| 422 |
+
|
| 423 |
+
def __call__(self, results):
|
| 424 |
+
"""Call function to rerange images.
|
| 425 |
+
|
| 426 |
+
Args:
|
| 427 |
+
results (dict): Result dict from loading pipeline.
|
| 428 |
+
Returns:
|
| 429 |
+
dict: Reranged results.
|
| 430 |
+
"""
|
| 431 |
+
|
| 432 |
+
img = results['img']
|
| 433 |
+
img_min_value = np.min(img)
|
| 434 |
+
img_max_value = np.max(img)
|
| 435 |
+
|
| 436 |
+
assert img_min_value < img_max_value
|
| 437 |
+
# rerange to [0, 1]
|
| 438 |
+
img = (img - img_min_value) / (img_max_value - img_min_value)
|
| 439 |
+
# rerange to [min_value, max_value]
|
| 440 |
+
img = img * (self.max_value - self.min_value) + self.min_value
|
| 441 |
+
results['img'] = img
|
| 442 |
+
|
| 443 |
+
return results
|
| 444 |
+
|
| 445 |
+
def __repr__(self):
|
| 446 |
+
repr_str = self.__class__.__name__
|
| 447 |
+
repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
|
| 448 |
+
return repr_str
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
@PIPELINES.register_module()
|
| 452 |
+
class CLAHE(object):
|
| 453 |
+
"""Use CLAHE method to process the image.
|
| 454 |
+
|
| 455 |
+
See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
|
| 456 |
+
Graphics Gems, 1994:474-485.` for more information.
|
| 457 |
+
|
| 458 |
+
Args:
|
| 459 |
+
clip_limit (float): Threshold for contrast limiting. Default: 40.0.
|
| 460 |
+
tile_grid_size (tuple[int]): Size of grid for histogram equalization.
|
| 461 |
+
Input image will be divided into equally sized rectangular tiles.
|
| 462 |
+
It defines the number of tiles in row and column. Default: (8, 8).
|
| 463 |
+
"""
|
| 464 |
+
|
| 465 |
+
def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
|
| 466 |
+
assert isinstance(clip_limit, (float, int))
|
| 467 |
+
self.clip_limit = clip_limit
|
| 468 |
+
assert is_tuple_of(tile_grid_size, int)
|
| 469 |
+
assert len(tile_grid_size) == 2
|
| 470 |
+
self.tile_grid_size = tile_grid_size
|
| 471 |
+
|
| 472 |
+
def __call__(self, results):
|
| 473 |
+
"""Call function to Use CLAHE method process images.
|
| 474 |
+
|
| 475 |
+
Args:
|
| 476 |
+
results (dict): Result dict from loading pipeline.
|
| 477 |
+
|
| 478 |
+
Returns:
|
| 479 |
+
dict: Processed results.
|
| 480 |
+
"""
|
| 481 |
+
|
| 482 |
+
for i in range(results['img'].shape[2]):
|
| 483 |
+
results['img'][:, :, i] = mmcv.clahe(
|
| 484 |
+
np.array(results['img'][:, :, i], dtype=np.uint8),
|
| 485 |
+
self.clip_limit, self.tile_grid_size)
|
| 486 |
+
|
| 487 |
+
return results
|
| 488 |
+
|
| 489 |
+
def __repr__(self):
|
| 490 |
+
repr_str = self.__class__.__name__
|
| 491 |
+
repr_str += f'(clip_limit={self.clip_limit}, '\
|
| 492 |
+
f'tile_grid_size={self.tile_grid_size})'
|
| 493 |
+
return repr_str
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
@PIPELINES.register_module()
|
| 497 |
+
class RandomCrop(object):
|
| 498 |
+
"""Random crop the image & seg.
|
| 499 |
+
|
| 500 |
+
Args:
|
| 501 |
+
crop_size (tuple): Expected size after cropping, (h, w).
|
| 502 |
+
cat_max_ratio (float): The maximum ratio that single category could
|
| 503 |
+
occupy.
|
| 504 |
+
"""
|
| 505 |
+
|
| 506 |
+
def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
|
| 507 |
+
assert crop_size[0] > 0 and crop_size[1] > 0
|
| 508 |
+
self.crop_size = crop_size
|
| 509 |
+
self.cat_max_ratio = cat_max_ratio
|
| 510 |
+
self.ignore_index = ignore_index
|
| 511 |
+
|
| 512 |
+
def get_crop_bbox(self, img):
|
| 513 |
+
"""Randomly get a crop bounding box."""
|
| 514 |
+
margin_h = max(img.shape[0] - self.crop_size[0], 0)
|
| 515 |
+
margin_w = max(img.shape[1] - self.crop_size[1], 0)
|
| 516 |
+
offset_h = np.random.randint(0, margin_h + 1)
|
| 517 |
+
offset_w = np.random.randint(0, margin_w + 1)
|
| 518 |
+
crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
|
| 519 |
+
crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
|
| 520 |
+
|
| 521 |
+
return crop_y1, crop_y2, crop_x1, crop_x2
|
| 522 |
+
|
| 523 |
+
def crop(self, img, crop_bbox):
|
| 524 |
+
"""Crop from ``img``"""
|
| 525 |
+
crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
|
| 526 |
+
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
|
| 527 |
+
return img
|
| 528 |
+
|
| 529 |
+
def __call__(self, results):
|
| 530 |
+
"""Call function to randomly crop images, semantic segmentation maps.
|
| 531 |
+
|
| 532 |
+
Args:
|
| 533 |
+
results (dict): Result dict from loading pipeline.
|
| 534 |
+
|
| 535 |
+
Returns:
|
| 536 |
+
dict: Randomly cropped results, 'img_shape' key in result dict is
|
| 537 |
+
updated according to crop size.
|
| 538 |
+
"""
|
| 539 |
+
|
| 540 |
+
img = results['img']
|
| 541 |
+
crop_bbox = self.get_crop_bbox(img)
|
| 542 |
+
if self.cat_max_ratio < 1.:
|
| 543 |
+
# Repeat 10 times
|
| 544 |
+
for _ in range(10):
|
| 545 |
+
seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
|
| 546 |
+
labels, cnt = np.unique(seg_temp, return_counts=True)
|
| 547 |
+
cnt = cnt[labels != self.ignore_index]
|
| 548 |
+
if len(cnt) > 1 and np.max(cnt) / np.sum(
|
| 549 |
+
cnt) < self.cat_max_ratio:
|
| 550 |
+
break
|
| 551 |
+
crop_bbox = self.get_crop_bbox(img)
|
| 552 |
+
|
| 553 |
+
# crop the image
|
| 554 |
+
img = self.crop(img, crop_bbox)
|
| 555 |
+
img_shape = img.shape
|
| 556 |
+
results['img'] = img
|
| 557 |
+
results['img_shape'] = img_shape
|
| 558 |
+
|
| 559 |
+
# crop semantic seg
|
| 560 |
+
for key in results.get('seg_fields', []):
|
| 561 |
+
results[key] = self.crop(results[key], crop_bbox)
|
| 562 |
+
|
| 563 |
+
return results
|
| 564 |
+
|
| 565 |
+
def __repr__(self):
|
| 566 |
+
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
@PIPELINES.register_module()
|
| 570 |
+
class RandomRotate(object):
|
| 571 |
+
"""Rotate the image & seg.
|
| 572 |
+
|
| 573 |
+
Args:
|
| 574 |
+
prob (float): The rotation probability.
|
| 575 |
+
degree (float, tuple[float]): Range of degrees to select from. If
|
| 576 |
+
degree is a number instead of tuple like (min, max),
|
| 577 |
+
the range of degree will be (``-degree``, ``+degree``)
|
| 578 |
+
pad_val (float, optional): Padding value of image. Default: 0.
|
| 579 |
+
seg_pad_val (float, optional): Padding value of segmentation map.
|
| 580 |
+
Default: 255.
|
| 581 |
+
center (tuple[float], optional): Center point (w, h) of the rotation in
|
| 582 |
+
the source image. If not specified, the center of the image will be
|
| 583 |
+
used. Default: None.
|
| 584 |
+
auto_bound (bool): Whether to adjust the image size to cover the whole
|
| 585 |
+
rotated image. Default: False
|
| 586 |
+
"""
|
| 587 |
+
|
| 588 |
+
def __init__(self,
|
| 589 |
+
prob,
|
| 590 |
+
degree,
|
| 591 |
+
pad_val=0,
|
| 592 |
+
seg_pad_val=255,
|
| 593 |
+
center=None,
|
| 594 |
+
auto_bound=False):
|
| 595 |
+
self.prob = prob
|
| 596 |
+
assert prob >= 0 and prob <= 1
|
| 597 |
+
if isinstance(degree, (float, int)):
|
| 598 |
+
assert degree > 0, f'degree {degree} should be positive'
|
| 599 |
+
self.degree = (-degree, degree)
|
| 600 |
+
else:
|
| 601 |
+
self.degree = degree
|
| 602 |
+
assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
|
| 603 |
+
f'tuple of (min, max)'
|
| 604 |
+
self.pal_val = pad_val
|
| 605 |
+
self.seg_pad_val = seg_pad_val
|
| 606 |
+
self.center = center
|
| 607 |
+
self.auto_bound = auto_bound
|
| 608 |
+
|
| 609 |
+
def __call__(self, results):
|
| 610 |
+
"""Call function to rotate image, semantic segmentation maps.
|
| 611 |
+
|
| 612 |
+
Args:
|
| 613 |
+
results (dict): Result dict from loading pipeline.
|
| 614 |
+
|
| 615 |
+
Returns:
|
| 616 |
+
dict: Rotated results.
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
rotate = True if np.random.rand() < self.prob else False
|
| 620 |
+
degree = np.random.uniform(min(*self.degree), max(*self.degree))
|
| 621 |
+
if rotate:
|
| 622 |
+
# rotate image
|
| 623 |
+
results['img'] = mmcv.imrotate(
|
| 624 |
+
results['img'],
|
| 625 |
+
angle=degree,
|
| 626 |
+
border_value=self.pal_val,
|
| 627 |
+
center=self.center,
|
| 628 |
+
auto_bound=self.auto_bound)
|
| 629 |
+
|
| 630 |
+
# rotate segs
|
| 631 |
+
for key in results.get('seg_fields', []):
|
| 632 |
+
results[key] = mmcv.imrotate(
|
| 633 |
+
results[key],
|
| 634 |
+
angle=degree,
|
| 635 |
+
border_value=self.seg_pad_val,
|
| 636 |
+
center=self.center,
|
| 637 |
+
auto_bound=self.auto_bound,
|
| 638 |
+
interpolation='nearest')
|
| 639 |
+
return results
|
| 640 |
+
|
| 641 |
+
def __repr__(self):
|
| 642 |
+
repr_str = self.__class__.__name__
|
| 643 |
+
repr_str += f'(prob={self.prob}, ' \
|
| 644 |
+
f'degree={self.degree}, ' \
|
| 645 |
+
f'pad_val={self.pal_val}, ' \
|
| 646 |
+
f'seg_pad_val={self.seg_pad_val}, ' \
|
| 647 |
+
f'center={self.center}, ' \
|
| 648 |
+
f'auto_bound={self.auto_bound})'
|
| 649 |
+
return repr_str
|
| 650 |
+
|
| 651 |
+
|
| 652 |
+
@PIPELINES.register_module()
|
| 653 |
+
class RGB2Gray(object):
|
| 654 |
+
"""Convert RGB image to grayscale image.
|
| 655 |
+
|
| 656 |
+
This transform calculate the weighted mean of input image channels with
|
| 657 |
+
``weights`` and then expand the channels to ``out_channels``. When
|
| 658 |
+
``out_channels`` is None, the number of output channels is the same as
|
| 659 |
+
input channels.
|
| 660 |
+
|
| 661 |
+
Args:
|
| 662 |
+
out_channels (int): Expected number of output channels after
|
| 663 |
+
transforming. Default: None.
|
| 664 |
+
weights (tuple[float]): The weights to calculate the weighted mean.
|
| 665 |
+
Default: (0.299, 0.587, 0.114).
|
| 666 |
+
"""
|
| 667 |
+
|
| 668 |
+
def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
|
| 669 |
+
assert out_channels is None or out_channels > 0
|
| 670 |
+
self.out_channels = out_channels
|
| 671 |
+
assert isinstance(weights, tuple)
|
| 672 |
+
for item in weights:
|
| 673 |
+
assert isinstance(item, (float, int))
|
| 674 |
+
self.weights = weights
|
| 675 |
+
|
| 676 |
+
def __call__(self, results):
|
| 677 |
+
"""Call function to convert RGB image to grayscale image.
|
| 678 |
+
|
| 679 |
+
Args:
|
| 680 |
+
results (dict): Result dict from loading pipeline.
|
| 681 |
+
|
| 682 |
+
Returns:
|
| 683 |
+
dict: Result dict with grayscale image.
|
| 684 |
+
"""
|
| 685 |
+
img = results['img']
|
| 686 |
+
assert len(img.shape) == 3
|
| 687 |
+
assert img.shape[2] == len(self.weights)
|
| 688 |
+
weights = np.array(self.weights).reshape((1, 1, -1))
|
| 689 |
+
img = (img * weights).sum(2, keepdims=True)
|
| 690 |
+
if self.out_channels is None:
|
| 691 |
+
img = img.repeat(weights.shape[2], axis=2)
|
| 692 |
+
else:
|
| 693 |
+
img = img.repeat(self.out_channels, axis=2)
|
| 694 |
+
|
| 695 |
+
results['img'] = img
|
| 696 |
+
results['img_shape'] = img.shape
|
| 697 |
+
|
| 698 |
+
return results
|
| 699 |
+
|
| 700 |
+
def __repr__(self):
|
| 701 |
+
repr_str = self.__class__.__name__
|
| 702 |
+
repr_str += f'(out_channels={self.out_channels}, ' \
|
| 703 |
+
f'weights={self.weights})'
|
| 704 |
+
return repr_str
|
| 705 |
+
|
| 706 |
+
|
| 707 |
+
@PIPELINES.register_module()
|
| 708 |
+
class AdjustGamma(object):
|
| 709 |
+
"""Using gamma correction to process the image.
|
| 710 |
+
|
| 711 |
+
Args:
|
| 712 |
+
gamma (float or int): Gamma value used in gamma correction.
|
| 713 |
+
Default: 1.0.
|
| 714 |
+
"""
|
| 715 |
+
|
| 716 |
+
def __init__(self, gamma=1.0):
|
| 717 |
+
assert isinstance(gamma, float) or isinstance(gamma, int)
|
| 718 |
+
assert gamma > 0
|
| 719 |
+
self.gamma = gamma
|
| 720 |
+
inv_gamma = 1.0 / gamma
|
| 721 |
+
self.table = np.array([(i / 255.0)**inv_gamma * 255
|
| 722 |
+
for i in np.arange(256)]).astype('uint8')
|
| 723 |
+
|
| 724 |
+
def __call__(self, results):
|
| 725 |
+
"""Call function to process the image with gamma correction.
|
| 726 |
+
|
| 727 |
+
Args:
|
| 728 |
+
results (dict): Result dict from loading pipeline.
|
| 729 |
+
|
| 730 |
+
Returns:
|
| 731 |
+
dict: Processed results.
|
| 732 |
+
"""
|
| 733 |
+
|
| 734 |
+
results['img'] = mmcv.lut_transform(
|
| 735 |
+
np.array(results['img'], dtype=np.uint8), self.table)
|
| 736 |
+
|
| 737 |
+
return results
|
| 738 |
+
|
| 739 |
+
def __repr__(self):
|
| 740 |
+
return self.__class__.__name__ + f'(gamma={self.gamma})'
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
@PIPELINES.register_module()
|
| 744 |
+
class SegRescale(object):
|
| 745 |
+
"""Rescale semantic segmentation maps.
|
| 746 |
+
|
| 747 |
+
Args:
|
| 748 |
+
scale_factor (float): The scale factor of the final output.
|
| 749 |
+
"""
|
| 750 |
+
|
| 751 |
+
def __init__(self, scale_factor=1):
|
| 752 |
+
self.scale_factor = scale_factor
|
| 753 |
+
|
| 754 |
+
def __call__(self, results):
|
| 755 |
+
"""Call function to scale the semantic segmentation map.
|
| 756 |
+
|
| 757 |
+
Args:
|
| 758 |
+
results (dict): Result dict from loading pipeline.
|
| 759 |
+
|
| 760 |
+
Returns:
|
| 761 |
+
dict: Result dict with semantic segmentation map scaled.
|
| 762 |
+
"""
|
| 763 |
+
for key in results.get('seg_fields', []):
|
| 764 |
+
if self.scale_factor != 1:
|
| 765 |
+
results[key] = mmcv.imrescale(
|
| 766 |
+
results[key], self.scale_factor, interpolation='nearest')
|
| 767 |
+
return results
|
| 768 |
+
|
| 769 |
+
def __repr__(self):
|
| 770 |
+
return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
@PIPELINES.register_module()
|
| 774 |
+
class PhotoMetricDistortion(object):
|
| 775 |
+
"""Apply photometric distortion to image sequentially, every transformation
|
| 776 |
+
is applied with a probability of 0.5. The position of random contrast is in
|
| 777 |
+
second or second to last.
|
| 778 |
+
|
| 779 |
+
1. random brightness
|
| 780 |
+
2. random contrast (mode 0)
|
| 781 |
+
3. convert color from BGR to HSV
|
| 782 |
+
4. random saturation
|
| 783 |
+
5. random hue
|
| 784 |
+
6. convert color from HSV to BGR
|
| 785 |
+
7. random contrast (mode 1)
|
| 786 |
+
|
| 787 |
+
Args:
|
| 788 |
+
brightness_delta (int): delta of brightness.
|
| 789 |
+
contrast_range (tuple): range of contrast.
|
| 790 |
+
saturation_range (tuple): range of saturation.
|
| 791 |
+
hue_delta (int): delta of hue.
|
| 792 |
+
"""
|
| 793 |
+
|
| 794 |
+
def __init__(self,
|
| 795 |
+
brightness_delta=32,
|
| 796 |
+
contrast_range=(0.5, 1.5),
|
| 797 |
+
saturation_range=(0.5, 1.5),
|
| 798 |
+
hue_delta=18):
|
| 799 |
+
self.brightness_delta = brightness_delta
|
| 800 |
+
self.contrast_lower, self.contrast_upper = contrast_range
|
| 801 |
+
self.saturation_lower, self.saturation_upper = saturation_range
|
| 802 |
+
self.hue_delta = hue_delta
|
| 803 |
+
|
| 804 |
+
def convert(self, img, alpha=1, beta=0):
|
| 805 |
+
"""Multiple with alpha and add beat with clip."""
|
| 806 |
+
img = img.astype(np.float32) * alpha + beta
|
| 807 |
+
img = np.clip(img, 0, 255)
|
| 808 |
+
return img.astype(np.uint8)
|
| 809 |
+
|
| 810 |
+
def brightness(self, img):
|
| 811 |
+
"""Brightness distortion."""
|
| 812 |
+
if random.randint(2):
|
| 813 |
+
return self.convert(
|
| 814 |
+
img,
|
| 815 |
+
beta=random.uniform(-self.brightness_delta,
|
| 816 |
+
self.brightness_delta))
|
| 817 |
+
return img
|
| 818 |
+
|
| 819 |
+
def contrast(self, img):
|
| 820 |
+
"""Contrast distortion."""
|
| 821 |
+
if random.randint(2):
|
| 822 |
+
return self.convert(
|
| 823 |
+
img,
|
| 824 |
+
alpha=random.uniform(self.contrast_lower, self.contrast_upper))
|
| 825 |
+
return img
|
| 826 |
+
|
| 827 |
+
def saturation(self, img):
|
| 828 |
+
"""Saturation distortion."""
|
| 829 |
+
if random.randint(2):
|
| 830 |
+
img = mmcv.bgr2hsv(img)
|
| 831 |
+
img[:, :, 1] = self.convert(
|
| 832 |
+
img[:, :, 1],
|
| 833 |
+
alpha=random.uniform(self.saturation_lower,
|
| 834 |
+
self.saturation_upper))
|
| 835 |
+
img = mmcv.hsv2bgr(img)
|
| 836 |
+
return img
|
| 837 |
+
|
| 838 |
+
def hue(self, img):
|
| 839 |
+
"""Hue distortion."""
|
| 840 |
+
if random.randint(2):
|
| 841 |
+
img = mmcv.bgr2hsv(img)
|
| 842 |
+
img[:, :,
|
| 843 |
+
0] = (img[:, :, 0].astype(int) +
|
| 844 |
+
random.randint(-self.hue_delta, self.hue_delta)) % 180
|
| 845 |
+
img = mmcv.hsv2bgr(img)
|
| 846 |
+
return img
|
| 847 |
+
|
| 848 |
+
def __call__(self, results):
|
| 849 |
+
"""Call function to perform photometric distortion on images.
|
| 850 |
+
|
| 851 |
+
Args:
|
| 852 |
+
results (dict): Result dict from loading pipeline.
|
| 853 |
+
|
| 854 |
+
Returns:
|
| 855 |
+
dict: Result dict with images distorted.
|
| 856 |
+
"""
|
| 857 |
+
|
| 858 |
+
img = results['img']
|
| 859 |
+
# random brightness
|
| 860 |
+
img = self.brightness(img)
|
| 861 |
+
|
| 862 |
+
# mode == 0 --> do random contrast first
|
| 863 |
+
# mode == 1 --> do random contrast last
|
| 864 |
+
mode = random.randint(2)
|
| 865 |
+
if mode == 1:
|
| 866 |
+
img = self.contrast(img)
|
| 867 |
+
|
| 868 |
+
# random saturation
|
| 869 |
+
img = self.saturation(img)
|
| 870 |
+
|
| 871 |
+
# random hue
|
| 872 |
+
img = self.hue(img)
|
| 873 |
+
|
| 874 |
+
# random contrast
|
| 875 |
+
if mode == 0:
|
| 876 |
+
img = self.contrast(img)
|
| 877 |
+
|
| 878 |
+
results['img'] = img
|
| 879 |
+
return results
|
| 880 |
+
|
| 881 |
+
def __repr__(self):
|
| 882 |
+
repr_str = self.__class__.__name__
|
| 883 |
+
repr_str += (f'(brightness_delta={self.brightness_delta}, '
|
| 884 |
+
f'contrast_range=({self.contrast_lower}, '
|
| 885 |
+
f'{self.contrast_upper}), '
|
| 886 |
+
f'saturation_range=({self.saturation_lower}, '
|
| 887 |
+
f'{self.saturation_upper}), '
|
| 888 |
+
f'hue_delta={self.hue_delta})')
|
| 889 |
+
return repr_str
|