Add files using upload-large-folder tool
Browse files- external/Metric3D/training/mono/configs/__init__.py +1 -0
- external/Metric3D/training/mono/datasets/distributed_sampler.py +275 -0
- external/Metric3D/training/mono/datasets/eth3d_dataset.py +94 -0
- external/Metric3D/training/mono/datasets/mapillary_psd_dataset.py +35 -0
- external/Metric3D/training/mono/utils/__init__.py +1 -0
- external/Metric3D/training/mono/utils/avg_meter.py +561 -0
- external/Metric3D/training/mono/utils/comm.py +343 -0
- external/Metric3D/training/mono/utils/db.py +36 -0
- external/Metric3D/training/mono/utils/do_test.py +245 -0
- external/Metric3D/training/mono/utils/do_train.py +529 -0
- external/Metric3D/training/mono/utils/inverse_warp.py +316 -0
- external/Metric3D/training/mono/utils/logger.py +105 -0
- external/Metric3D/training/mono/utils/logit_to_depth.py +58 -0
- external/Metric3D/training/mono/utils/misc.py +67 -0
- external/Metric3D/training/mono/utils/pcd_utils.py +52 -0
- external/Metric3D/training/mono/utils/running.py +374 -0
- external/Metric3D/training/mono/utils/transform.py +1491 -0
- external/Metric3D/training/mono/utils/unproj_pcd.py +82 -0
- external/Metric3D/training/mono/utils/visualization.py +209 -0
- external/Metric3D/training/mono/utils/weather_aug_utils.py +872 -0
external/Metric3D/training/mono/configs/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
external/Metric3D/training/mono/datasets/distributed_sampler.py
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import logging
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
import math
|
| 5 |
+
import os
|
| 6 |
+
from mono.utils.comm import get_func, main_process
|
| 7 |
+
from torch.utils.data import ConcatDataset, DataLoader
|
| 8 |
+
import random
|
| 9 |
+
import copy
|
| 10 |
+
import torch
|
| 11 |
+
import logging
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_dataset_n_sampler_with_cfg(cfg, phase):
|
| 15 |
+
# build data array, similar datasets are organized in the same group
|
| 16 |
+
datasets_array = build_data_array(cfg, phase)
|
| 17 |
+
# concatenate datasets with torch.utils.data.ConcatDataset methods
|
| 18 |
+
dataset_merge = concatenate_datasets(datasets_array)
|
| 19 |
+
# customerize sampler
|
| 20 |
+
custom_sampler = CustomerMultiDataSampler(cfg, dataset_merge, phase)
|
| 21 |
+
return dataset_merge, custom_sampler
|
| 22 |
+
|
| 23 |
+
class CustomerMultiDataSampler(torch.utils.data.Sampler):
|
| 24 |
+
"""
|
| 25 |
+
Customerize a sampler method. During this process, the size of some datasets will be tailored or expanded.
|
| 26 |
+
Such process aims to ensure each group has the same data size.
|
| 27 |
+
e.g. dataset_list: [[A, B, C], [E, F], M], then group 'A,B,C' (Size(A) + Size(B) + Size(C)) has the same size
|
| 28 |
+
as to group 'E,F' (Size(E) + Size(F)), so as to 'M'.
|
| 29 |
+
args:
|
| 30 |
+
@ cfg: configs for each dataset.
|
| 31 |
+
@ dataset_merge: merged multiple datasets with the torch.utils.data.ConcatDataset method.
|
| 32 |
+
@ phase: train/val/test phase.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, cfg, dataset_merge, phase):
|
| 36 |
+
self.cfg = cfg
|
| 37 |
+
self.world_size = int(os.environ['WORLD_SIZE'])
|
| 38 |
+
self.phase = phase
|
| 39 |
+
self.global_rank = cfg.dist_params.global_rank
|
| 40 |
+
self.dataset_merge = dataset_merge
|
| 41 |
+
self.logger = logging.getLogger()
|
| 42 |
+
if main_process():
|
| 43 |
+
self.logger.info(f'Initilized CustomerMultiDataSampler for {phase}.')
|
| 44 |
+
self.random_seed = 136
|
| 45 |
+
self.random_seed_cp = 639
|
| 46 |
+
|
| 47 |
+
def __iter__(self):
|
| 48 |
+
self.create_samplers()
|
| 49 |
+
self.logger.info("Sample list of {} in rank {} is: {}".format(self.phase, self.global_rank, ' '.join(map(str, self.sample_indices_array[-20: -10]))))
|
| 50 |
+
# subsample, each rank sample a subset for training.
|
| 51 |
+
rank_offset = self.each_gpu_size * self.global_rank
|
| 52 |
+
rank_indices = self.sample_indices_array[rank_offset : rank_offset + self.each_gpu_size]
|
| 53 |
+
|
| 54 |
+
assert rank_indices.size == self.each_gpu_size
|
| 55 |
+
|
| 56 |
+
for id in rank_indices:
|
| 57 |
+
yield id
|
| 58 |
+
|
| 59 |
+
def __len__(self):
|
| 60 |
+
return self.total_dist_size
|
| 61 |
+
|
| 62 |
+
def create_samplers(self):
|
| 63 |
+
# sample idx for each dataset, idx value should not exceed the size of data,
|
| 64 |
+
# i.e. 0 <= idx < len(data_size)
|
| 65 |
+
#self.samples_mat = []
|
| 66 |
+
self.indices_mat = []
|
| 67 |
+
# size expanded, idx cumulative aggregrated for calling
|
| 68 |
+
self.indices_expand_mat = []
|
| 69 |
+
|
| 70 |
+
# max group size, each group may consists of multiple similar datasets
|
| 71 |
+
max_group_size = max([len(i) for i in self.dataset_merge.datasets])
|
| 72 |
+
|
| 73 |
+
dataset_cumulative_sizes = [0] + self.dataset_merge.cumulative_sizes
|
| 74 |
+
|
| 75 |
+
for gi, dataset_group in enumerate(self.dataset_merge.datasets):
|
| 76 |
+
# the merged dataset consists of multiple grouped datasets
|
| 77 |
+
samples_group = []
|
| 78 |
+
indices_expand_group = []
|
| 79 |
+
indices_group = []
|
| 80 |
+
|
| 81 |
+
# to ensure each group has the same size, group with less data has to duplicate its sample list for 'cp_times' times
|
| 82 |
+
cp_times = max_group_size / len(dataset_group)
|
| 83 |
+
|
| 84 |
+
# adjust each group to ensure they have the same data size
|
| 85 |
+
group_cumulative_sizes = [0] + dataset_group.cumulative_sizes
|
| 86 |
+
expand_indices_sizes = (np.array(group_cumulative_sizes) * cp_times).astype(np.int)
|
| 87 |
+
expand_indices_sizes[-1] = max_group_size
|
| 88 |
+
# datasets in the same group have to expand its sample list
|
| 89 |
+
expand_indices_sizes = expand_indices_sizes[1:] - expand_indices_sizes[:-1]
|
| 90 |
+
|
| 91 |
+
for di, dataset_i in enumerate(dataset_group.datasets):
|
| 92 |
+
# datasets residing in each group may have similar features
|
| 93 |
+
# samples indices list
|
| 94 |
+
dataset_i_ori_sample_list = self.dataset_merge.datasets[gi].datasets[di].sample_list
|
| 95 |
+
if self.phase == 'train':
|
| 96 |
+
#sample_list_i = random.sample(dataset_i_ori_sample_list, len(dataset_i_ori_sample_list))
|
| 97 |
+
sample_list_i = dataset_i_ori_sample_list
|
| 98 |
+
else:
|
| 99 |
+
# no shuffle in val or test
|
| 100 |
+
sample_list_i = dataset_i_ori_sample_list
|
| 101 |
+
#samples_group.append(sample_list_i)
|
| 102 |
+
|
| 103 |
+
# expand the sample list for each dataset
|
| 104 |
+
expand_size_i = expand_indices_sizes[di]
|
| 105 |
+
indices_expand_list = copy.deepcopy(sample_list_i)
|
| 106 |
+
|
| 107 |
+
for i in range(int(cp_times)-1):
|
| 108 |
+
#indices_expand_list += random.sample(sample_list_i, len(dataset_i))
|
| 109 |
+
indices_expand_list += sample_list_i
|
| 110 |
+
random.seed(self.random_seed_cp)
|
| 111 |
+
indices_expand_list += random.sample(sample_list_i, len(dataset_i))[:expand_size_i % len(dataset_i)]
|
| 112 |
+
# adjust indices value
|
| 113 |
+
indices_expand_list = np.array(indices_expand_list) + dataset_cumulative_sizes[gi] + group_cumulative_sizes[di]
|
| 114 |
+
indices_list = np.array(sample_list_i) + dataset_cumulative_sizes[gi] + group_cumulative_sizes[di]
|
| 115 |
+
|
| 116 |
+
# the expanded sample list for dataset_i
|
| 117 |
+
indices_expand_group.append(indices_expand_list)
|
| 118 |
+
# the original sample list for the dataset_i
|
| 119 |
+
indices_group.append(indices_list)
|
| 120 |
+
|
| 121 |
+
if main_process():
|
| 122 |
+
self.logger.info(f'"{dataset_i.data_name}", {self.phase} set in group {gi}: ' +
|
| 123 |
+
f'expand size {len(sample_list_i)} --->>>---, {expand_size_i}')
|
| 124 |
+
|
| 125 |
+
concat_group = np.concatenate(indices_expand_group)
|
| 126 |
+
# shuffle the grouped datasets samples, e.g. each group data is [a1, a2, a3, b1, b2, b3, b4, c1, c2], the shuffled one, maybe, is [a3, b1, b2, b3, b4, c1,...]
|
| 127 |
+
np.random.seed(self.random_seed)
|
| 128 |
+
if self.phase == 'train':
|
| 129 |
+
np.random.shuffle(concat_group)
|
| 130 |
+
self.indices_expand_mat.append(concat_group)
|
| 131 |
+
self.indices_mat.append(np.concatenate(indices_group))
|
| 132 |
+
|
| 133 |
+
# create sample list
|
| 134 |
+
if "train" in self.phase:
|
| 135 |
+
# data groups are cross sorted, i.e. [A, B, C, A, B, C....]
|
| 136 |
+
self.sample_indices_array = np.array(self.indices_expand_mat).transpose(1, 0).reshape(-1)
|
| 137 |
+
self.total_indices_size = max_group_size * len(self.dataset_merge.datasets)
|
| 138 |
+
else:
|
| 139 |
+
self.sample_indices_array = np.concatenate(self.indices_mat[:])
|
| 140 |
+
self.total_indices_size = self.sample_indices_array.size
|
| 141 |
+
|
| 142 |
+
self.total_sample_size = len(self.dataset_merge)
|
| 143 |
+
self.each_gpu_size = int(np.ceil(self.total_indices_size * 1.0 / self.world_size)) # ignore some residual samples
|
| 144 |
+
self.total_dist_size = self.each_gpu_size * self.world_size
|
| 145 |
+
# add extra samples to make it evenly divisible
|
| 146 |
+
diff_size = int(self.total_dist_size - self.total_indices_size) # int(self.total_dist_size - self.total_sample_size)
|
| 147 |
+
if diff_size > 0:
|
| 148 |
+
self.sample_indices_array = np.append(self.sample_indices_array, self.sample_indices_array[:diff_size])
|
| 149 |
+
#if main_process():
|
| 150 |
+
self.logger.info(f'Expanded data size in merged dataset: {self.total_sample_size}, adjusted data size for distributed running: {self.total_dist_size}')
|
| 151 |
+
self.random_seed += 413
|
| 152 |
+
self.random_seed_cp += 377
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def build_data_array(cfg, phase):
|
| 156 |
+
"""
|
| 157 |
+
Construct data repo with cfg. In cfg, there is a data name array, which encloses the name of each data.
|
| 158 |
+
Each data name links to a data config file. With this config file, dataset can be constructed.
|
| 159 |
+
e.g. [['A', 'B', 'C'], ['E', 'F'], 'M']. Each letter indicates a dataset.
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
datasets_array = []
|
| 163 |
+
data_array_names_for_log = []
|
| 164 |
+
|
| 165 |
+
dataname_array = cfg.data_array
|
| 166 |
+
for group_i in dataname_array:
|
| 167 |
+
dataset_group_i = []
|
| 168 |
+
data_group_i_names_for_log = []
|
| 169 |
+
if not isinstance(group_i, list):
|
| 170 |
+
group_i = [group_i, ]
|
| 171 |
+
for data_i in group_i:
|
| 172 |
+
if not isinstance(data_i, dict):
|
| 173 |
+
raise TypeError(f'data name must be a dict, but got {type(data_i)}')
|
| 174 |
+
# each data only can employ a single dataset config
|
| 175 |
+
assert len(data_i.values()) == 1
|
| 176 |
+
if list(data_i.values())[0] not in cfg:
|
| 177 |
+
raise RuntimeError(f'cannot find the data config for {data_i}')
|
| 178 |
+
|
| 179 |
+
# dataset configure for data i
|
| 180 |
+
#data_i_cfg = cfg[data_i]
|
| 181 |
+
args = copy.deepcopy(cfg) #data_i_cfg.copy()
|
| 182 |
+
data_i_cfg_name = list(data_i.values())[0]
|
| 183 |
+
data_i_db_info_name = list(data_i.keys())[0]
|
| 184 |
+
data_i_db_info = cfg.db_info[data_i_db_info_name]
|
| 185 |
+
|
| 186 |
+
# Online evaluation using only metric datasets
|
| 187 |
+
# if phase == 'val' and 'exclude' in cfg.evaluation \
|
| 188 |
+
# and data_i_db_info_name in cfg.evaluation.exclude:
|
| 189 |
+
# continue
|
| 190 |
+
|
| 191 |
+
# dataset lib name
|
| 192 |
+
obj_name = cfg[data_i_cfg_name]['lib']
|
| 193 |
+
obj_path = os.path.dirname(__file__).split(os.getcwd() + '/')[-1].replace('/', '.') + '.' + obj_name
|
| 194 |
+
obj_cls = get_func(obj_path)
|
| 195 |
+
if obj_cls is None:
|
| 196 |
+
raise KeyError(f'{obj_name} is not in .data')
|
| 197 |
+
|
| 198 |
+
dataset_i = obj_cls(
|
| 199 |
+
args[data_i_cfg_name],
|
| 200 |
+
phase,
|
| 201 |
+
db_info=data_i_db_info,
|
| 202 |
+
**cfg.data_basic)
|
| 203 |
+
# if 'Taskonomy' not in data_i:
|
| 204 |
+
# print('>>>>>>>>>>ditributed_sampler LN189', dataset_i.data_name, dataset_i.annotations['files'][0]['rgb'].split('/')[-1],
|
| 205 |
+
# dataset_i.annotations['files'][1000]['rgb'].split('/')[-1], dataset_i.annotations['files'][3000]['rgb'].split('/')[-1])
|
| 206 |
+
# else:
|
| 207 |
+
# print('>>>>>>>>>>ditributed_sampler LN189', dataset_i.data_name, dataset_i.annotations['files'][0]['meta_data'].split('/')[-1],
|
| 208 |
+
# dataset_i.annotations['files'][1000]['meta_data'].split('/')[-1], dataset_i.annotations['files'][3000]['meta_data'].split('/')[-1])
|
| 209 |
+
dataset_group_i.append(dataset_i)
|
| 210 |
+
# get data name for log
|
| 211 |
+
data_group_i_names_for_log.append(data_i_db_info_name)
|
| 212 |
+
|
| 213 |
+
datasets_array.append(dataset_group_i)
|
| 214 |
+
data_array_names_for_log.append(data_group_i_names_for_log)
|
| 215 |
+
|
| 216 |
+
if main_process():
|
| 217 |
+
logger = logging.getLogger()
|
| 218 |
+
logger.info(f'{phase}: data array ({data_array_names_for_log}) has been constructed.')
|
| 219 |
+
return datasets_array
|
| 220 |
+
|
| 221 |
+
def concatenate_datasets(datasets_array):
|
| 222 |
+
"""
|
| 223 |
+
Merge grouped datasets to a single one.
|
| 224 |
+
args:
|
| 225 |
+
@ dataset_list: the list of constructed dataset.
|
| 226 |
+
"""
|
| 227 |
+
#max_size = 0
|
| 228 |
+
dataset_merge = []
|
| 229 |
+
for group in datasets_array:
|
| 230 |
+
group_dataset = ConcatDataset(group)
|
| 231 |
+
group_size = len(group_dataset)
|
| 232 |
+
#max_size = max_size if group_size < max_size else group_size
|
| 233 |
+
dataset_merge.append(group_dataset)
|
| 234 |
+
return ConcatDataset(dataset_merge)
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def log_canonical_transfer_info(cfg):
|
| 238 |
+
logger = logging.getLogger()
|
| 239 |
+
data = []
|
| 240 |
+
canonical_focal_length = cfg.data_basic.canonical_space.focal_length
|
| 241 |
+
canonical_size = cfg.data_basic.canonical_space.img_size
|
| 242 |
+
for group_i in cfg.data_array:
|
| 243 |
+
if not isinstance(group_i, list):
|
| 244 |
+
group_i = [group_i, ]
|
| 245 |
+
for data_i in group_i:
|
| 246 |
+
if not isinstance(data_i, dict):
|
| 247 |
+
raise TypeError(f'data name must be a dict, but got {type(data_i)}')
|
| 248 |
+
assert len(data_i.values()) == 1
|
| 249 |
+
if list(data_i.values())[0] not in cfg:
|
| 250 |
+
raise RuntimeError(f'cannot find the data config for {data_i.values()}')
|
| 251 |
+
if list(data_i.values())[0] not in data:
|
| 252 |
+
data.append(list(data_i.values())[0])
|
| 253 |
+
|
| 254 |
+
logger.info('>>>>>>>>>>>>>>Some data transfer details during augmentation.>>>>>>>>>>>>>>')
|
| 255 |
+
for data_i in data:
|
| 256 |
+
data_i_cfg = cfg[data_i]
|
| 257 |
+
if type(data_i_cfg.original_focal_length) != tuple:
|
| 258 |
+
ori_focal = (data_i_cfg.original_focal_length, )
|
| 259 |
+
else:
|
| 260 |
+
ori_focal = data_i_cfg.original_focal_length
|
| 261 |
+
|
| 262 |
+
log_str = '%s transfer details: \n' % data_i
|
| 263 |
+
for ori_f in ori_focal:
|
| 264 |
+
# to canonical space
|
| 265 |
+
scalor = canonical_focal_length / ori_f
|
| 266 |
+
img_size = (data_i_cfg.original_size[0]*scalor, data_i_cfg.original_size[1]*scalor)
|
| 267 |
+
log_str += 'To canonical space: focal length, %f -> %f; size, %s -> %s\n' %(ori_f, canonical_focal_length, data_i_cfg.original_size, img_size)
|
| 268 |
+
|
| 269 |
+
# random resize in augmentaiton
|
| 270 |
+
resize_range = data_i_cfg.data.train.pipeline[1].ratio_range
|
| 271 |
+
resize_low = (img_size[0]*resize_range[0], img_size[1]*resize_range[0])
|
| 272 |
+
resize_up = (img_size[0]*resize_range[1], img_size[1]*resize_range[1])
|
| 273 |
+
log_str += 'Random resize bound: %s ~ %s; \n' %(resize_low, resize_up)
|
| 274 |
+
|
| 275 |
+
logger.info(log_str)
|
external/Metric3D/training/mono/datasets/eth3d_dataset.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import torch
|
| 4 |
+
import torchvision.transforms as transforms
|
| 5 |
+
import os.path
|
| 6 |
+
import numpy as np
|
| 7 |
+
import cv2
|
| 8 |
+
from torch.utils.data import Dataset
|
| 9 |
+
import random
|
| 10 |
+
from .__base_dataset__ import BaseDataset
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class ETH3DDataset(BaseDataset):
|
| 14 |
+
def __init__(self, cfg, phase, **kwargs):
|
| 15 |
+
super(ETH3DDataset, self).__init__(
|
| 16 |
+
cfg=cfg,
|
| 17 |
+
phase=phase,
|
| 18 |
+
**kwargs)
|
| 19 |
+
self.metric_scale = cfg.metric_scale
|
| 20 |
+
|
| 21 |
+
def __getitem__(self, idx):
|
| 22 |
+
anno = self.annotations['files'][idx]
|
| 23 |
+
curr_rgb_path = os.path.join(self.data_root, anno['rgb_path'])
|
| 24 |
+
curr_depth_path = os.path.join(self.depth_root, anno['depth_path'])
|
| 25 |
+
meta_data = self.load_meta_data(anno)
|
| 26 |
+
ori_curr_intrinsic = [2000, 2000, 3024, 2016] #meta_data['cam_in']
|
| 27 |
+
|
| 28 |
+
curr_rgb = cv2.imread(curr_rgb_path) # [r, g, b]
|
| 29 |
+
with open(curr_depth_path, 'r') as f:
|
| 30 |
+
imgfile = np.fromfile(f, np.float32)
|
| 31 |
+
curr_depth = imgfile.reshape((4032, 6048))
|
| 32 |
+
curr_depth[curr_depth>100] = 0
|
| 33 |
+
|
| 34 |
+
#curr_rgb, curr_depth = self.load_rgb_depth(curr_rgb_path, curr_depth_path)
|
| 35 |
+
# curr_rgb = cv2.resize(curr_rgb, dsize=(3024, 2016), interpolation=cv2.INTER_LINEAR)
|
| 36 |
+
# curr_depth = cv2.resize(curr_depth, dsize=(3024, 2016), interpolation=cv2.INTER_LINEAR)
|
| 37 |
+
# ori_curr_intrinsic = [i//2 for i in ori_curr_intrinsic]
|
| 38 |
+
|
| 39 |
+
ori_h, ori_w, _ = curr_rgb.shape
|
| 40 |
+
# create camera model
|
| 41 |
+
curr_cam_model = self.create_cam_model(curr_rgb.shape[0], curr_rgb.shape[1], ori_curr_intrinsic)
|
| 42 |
+
# load tmpl rgb info
|
| 43 |
+
# tmpl_annos = self.load_tmpl_annos(anno, curr_rgb, meta_data)
|
| 44 |
+
# tmpl_rgb = tmpl_annos['tmpl_rgb_list'] # list of reference rgbs
|
| 45 |
+
|
| 46 |
+
transform_paras = dict()
|
| 47 |
+
rgbs, depths, intrinsics, cam_models, _, other_labels, transform_paras = self.img_transforms(
|
| 48 |
+
images=[curr_rgb, ],
|
| 49 |
+
labels=[curr_depth, ],
|
| 50 |
+
intrinsics=[ori_curr_intrinsic,],
|
| 51 |
+
cam_models=[curr_cam_model, ],
|
| 52 |
+
transform_paras=transform_paras)
|
| 53 |
+
# depth in original size
|
| 54 |
+
depth_out = self.clip_depth(curr_depth) * self.depth_range[1]
|
| 55 |
+
|
| 56 |
+
filename = os.path.basename(anno['rgb_path'])
|
| 57 |
+
curr_intrinsic_mat = self.intrinsics_list2mat(intrinsics[0])
|
| 58 |
+
|
| 59 |
+
pad = transform_paras['pad'] if 'pad' in transform_paras else [0,0,0,0]
|
| 60 |
+
scale_ratio = transform_paras['label_scale_factor'] if 'label_scale_factor' in transform_paras else 1.0
|
| 61 |
+
cam_models_stacks = [
|
| 62 |
+
torch.nn.functional.interpolate(cam_models[0][None, :, :, :], size=(cam_models[0].shape[1]//i, cam_models[0].shape[2]//i), mode='bilinear', align_corners=False).squeeze()
|
| 63 |
+
for i in [2, 4, 8, 16, 32]
|
| 64 |
+
]
|
| 65 |
+
raw_rgb = torch.from_numpy(curr_rgb)
|
| 66 |
+
data = dict(input=rgbs[0],
|
| 67 |
+
target=depth_out,
|
| 68 |
+
intrinsic=curr_intrinsic_mat,
|
| 69 |
+
filename=filename,
|
| 70 |
+
dataset=self.data_name,
|
| 71 |
+
cam_model=cam_models_stacks,
|
| 72 |
+
ref_input=rgbs[1:],
|
| 73 |
+
tmpl_flg=False,
|
| 74 |
+
pad=pad,
|
| 75 |
+
scale=scale_ratio,
|
| 76 |
+
raw_rgb=raw_rgb,
|
| 77 |
+
normal = np.zeros_like(curr_rgb.transpose((2,0,1))),
|
| 78 |
+
#stereo_depth=torch.zeros_like(depth_out)
|
| 79 |
+
)
|
| 80 |
+
return data
|
| 81 |
+
|
| 82 |
+
def process_depth(self, depth):
|
| 83 |
+
depth[depth>65500] = 0
|
| 84 |
+
depth /= self.metric_scale
|
| 85 |
+
return depth
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
if __name__ == '__main__':
|
| 90 |
+
from mmcv.utils import Config
|
| 91 |
+
cfg = Config.fromfile('mono/configs/Apolloscape_DDAD/convnext_base.cascade.1m.sgd.mae.py')
|
| 92 |
+
dataset_i = NYUDataset(cfg['Apolloscape'], 'train', **cfg.data_basic)
|
| 93 |
+
print(dataset_i)
|
| 94 |
+
|
external/Metric3D/training/mono/datasets/mapillary_psd_dataset.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import torch
|
| 4 |
+
import torchvision.transforms as transforms
|
| 5 |
+
import os.path
|
| 6 |
+
import numpy as np
|
| 7 |
+
import cv2
|
| 8 |
+
from torch.utils.data import Dataset
|
| 9 |
+
import random
|
| 10 |
+
from .__base_dataset__ import BaseDataset
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
|
| 13 |
+
class MapillaryPSDDataset(BaseDataset):
|
| 14 |
+
def __init__(self, cfg, phase, **kwargs):
|
| 15 |
+
super(MapillaryPSDDataset, self).__init__(
|
| 16 |
+
cfg=cfg,
|
| 17 |
+
phase=phase,
|
| 18 |
+
**kwargs)
|
| 19 |
+
self.metric_scale = cfg.metric_scale
|
| 20 |
+
|
| 21 |
+
def process_depth(self, depth, rgb):
|
| 22 |
+
depth[depth>65500] = 0
|
| 23 |
+
depth /= self.metric_scale
|
| 24 |
+
h, w, _ = rgb.shape # to rgb size
|
| 25 |
+
depth_resize = cv2.resize(depth, (w, h), interpolation=cv2.INTER_NEAREST)
|
| 26 |
+
return depth_resize
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
if __name__ == '__main__':
|
| 31 |
+
from mmcv.utils import Config
|
| 32 |
+
cfg = Config.fromfile('mono/configs/Apolloscape_DDAD/convnext_base.cascade.1m.sgd.mae.py')
|
| 33 |
+
dataset_i = MapillaryDataset(cfg['Apolloscape'], 'train', **cfg.data_basic)
|
| 34 |
+
print(dataset_i)
|
| 35 |
+
|
external/Metric3D/training/mono/utils/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
external/Metric3D/training/mono/utils/avg_meter.py
ADDED
|
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
from .inverse_warp import pixel2cam, cam2pixel2
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
|
| 8 |
+
class AverageMeter(object):
|
| 9 |
+
"""Computes and stores the average and current value"""
|
| 10 |
+
def __init__(self) -> None:
|
| 11 |
+
self.reset()
|
| 12 |
+
|
| 13 |
+
def reset(self) -> None:
|
| 14 |
+
self.val = np.longdouble(0.0)
|
| 15 |
+
self.avg = np.longdouble(0.0)
|
| 16 |
+
self.sum = np.longdouble(0.0)
|
| 17 |
+
self.count = np.longdouble(0.0)
|
| 18 |
+
|
| 19 |
+
def update(self, val, n: float = 1) -> None:
|
| 20 |
+
self.val = val
|
| 21 |
+
self.sum += val
|
| 22 |
+
self.count += n
|
| 23 |
+
self.avg = self.sum / (self.count + 1e-6)
|
| 24 |
+
|
| 25 |
+
class MetricAverageMeter(AverageMeter):
|
| 26 |
+
"""
|
| 27 |
+
An AverageMeter designed specifically for evaluating segmentation results.
|
| 28 |
+
"""
|
| 29 |
+
def __init__(self, metrics: list) -> None:
|
| 30 |
+
""" Initialize object. """
|
| 31 |
+
# average meters for metrics
|
| 32 |
+
self.abs_rel = AverageMeter()
|
| 33 |
+
self.rmse = AverageMeter()
|
| 34 |
+
self.silog = AverageMeter()
|
| 35 |
+
self.delta1 = AverageMeter()
|
| 36 |
+
self.delta2 = AverageMeter()
|
| 37 |
+
self.delta3 = AverageMeter()
|
| 38 |
+
|
| 39 |
+
self.metrics = metrics
|
| 40 |
+
|
| 41 |
+
self.consistency = AverageMeter()
|
| 42 |
+
self.log10 = AverageMeter()
|
| 43 |
+
self.rmse_log = AverageMeter()
|
| 44 |
+
self.sq_rel = AverageMeter()
|
| 45 |
+
|
| 46 |
+
# normal
|
| 47 |
+
self.normal_mean = AverageMeter()
|
| 48 |
+
self.normal_rmse = AverageMeter()
|
| 49 |
+
self.normal_a1 = AverageMeter()
|
| 50 |
+
self.normal_a2 = AverageMeter()
|
| 51 |
+
|
| 52 |
+
self.normal_median = AverageMeter()
|
| 53 |
+
self.normal_a3 = AverageMeter()
|
| 54 |
+
self.normal_a4 = AverageMeter()
|
| 55 |
+
self.normal_a5 = AverageMeter()
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def update_metrics_cpu(self,
|
| 59 |
+
pred: torch.Tensor,
|
| 60 |
+
target: torch.Tensor,
|
| 61 |
+
mask: torch.Tensor,):
|
| 62 |
+
"""
|
| 63 |
+
Update metrics on cpu
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
assert pred.shape == target.shape
|
| 67 |
+
|
| 68 |
+
if len(pred.shape) == 3:
|
| 69 |
+
pred = pred[:, None, :, :]
|
| 70 |
+
target = target[:, None, :, :]
|
| 71 |
+
mask = mask[:, None, :, :]
|
| 72 |
+
elif len(pred.shape) == 2:
|
| 73 |
+
pred = pred[None, None, :, :]
|
| 74 |
+
target = target[None, None, :, :]
|
| 75 |
+
mask = mask[None, None, :, :]
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# Absolute relative error
|
| 79 |
+
abs_rel_sum, valid_pics = get_absrel_err(pred, target, mask)
|
| 80 |
+
abs_rel_sum = abs_rel_sum.numpy()
|
| 81 |
+
valid_pics = valid_pics.numpy()
|
| 82 |
+
self.abs_rel.update(abs_rel_sum, valid_pics)
|
| 83 |
+
|
| 84 |
+
# squared relative error
|
| 85 |
+
sqrel_sum, _ = get_sqrel_err(pred, target, mask)
|
| 86 |
+
sqrel_sum = sqrel_sum.numpy()
|
| 87 |
+
self.sq_rel.update(sqrel_sum, valid_pics)
|
| 88 |
+
|
| 89 |
+
# root mean squared error
|
| 90 |
+
rmse_sum, _ = get_rmse_err(pred, target, mask)
|
| 91 |
+
rmse_sum = rmse_sum.numpy()
|
| 92 |
+
self.rmse.update(rmse_sum, valid_pics)
|
| 93 |
+
|
| 94 |
+
# log root mean squared error
|
| 95 |
+
log_rmse_sum, _ = get_rmse_log_err(pred, target, mask)
|
| 96 |
+
log_rmse_sum = log_rmse_sum.numpy()
|
| 97 |
+
self.rmse.update(log_rmse_sum, valid_pics)
|
| 98 |
+
|
| 99 |
+
# log10 error
|
| 100 |
+
log10_sum, _ = get_log10_err(pred, target, mask)
|
| 101 |
+
log10_sum = log10_sum.numpy()
|
| 102 |
+
self.rmse.update(log10_sum, valid_pics)
|
| 103 |
+
|
| 104 |
+
# scale-invariant root mean squared error in log space
|
| 105 |
+
silog_sum, _ = get_silog_err(pred, target, mask)
|
| 106 |
+
silog_sum = silog_sum.numpy()
|
| 107 |
+
self.silog.update(silog_sum, valid_pics)
|
| 108 |
+
|
| 109 |
+
# ratio error, delta1, ....
|
| 110 |
+
delta1_sum, delta2_sum, delta3_sum, _ = get_ratio_error(pred, target, mask)
|
| 111 |
+
delta1_sum = delta1_sum.numpy()
|
| 112 |
+
delta2_sum = delta2_sum.numpy()
|
| 113 |
+
delta3_sum = delta3_sum.numpy()
|
| 114 |
+
|
| 115 |
+
self.delta1.update(delta1_sum, valid_pics)
|
| 116 |
+
self.delta2.update(delta1_sum, valid_pics)
|
| 117 |
+
self.delta3.update(delta1_sum, valid_pics)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def update_metrics_gpu(
|
| 121 |
+
self,
|
| 122 |
+
pred: torch.Tensor,
|
| 123 |
+
target: torch.Tensor,
|
| 124 |
+
mask: torch.Tensor,
|
| 125 |
+
is_distributed: bool,
|
| 126 |
+
pred_next: torch.tensor = None,
|
| 127 |
+
pose_f1_to_f2: torch.tensor = None,
|
| 128 |
+
intrinsic: torch.tensor = None):
|
| 129 |
+
"""
|
| 130 |
+
Update metric on GPU. It supports distributed processing. If multiple machines are employed, please
|
| 131 |
+
set 'is_distributed' as True.
|
| 132 |
+
"""
|
| 133 |
+
assert pred.shape == target.shape
|
| 134 |
+
|
| 135 |
+
if len(pred.shape) == 3:
|
| 136 |
+
pred = pred[:, None, :, :]
|
| 137 |
+
target = target[:, None, :, :]
|
| 138 |
+
mask = mask[:, None, :, :]
|
| 139 |
+
elif len(pred.shape) == 2:
|
| 140 |
+
pred = pred[None, None, :, :]
|
| 141 |
+
target = target[None, None, :, :]
|
| 142 |
+
mask = mask[None, None, :, :]
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# Absolute relative error
|
| 146 |
+
abs_rel_sum, valid_pics = get_absrel_err(pred, target, mask)
|
| 147 |
+
if is_distributed:
|
| 148 |
+
dist.all_reduce(abs_rel_sum), dist.all_reduce(valid_pics)
|
| 149 |
+
abs_rel_sum = abs_rel_sum.cpu().numpy()
|
| 150 |
+
valid_pics = int(valid_pics)
|
| 151 |
+
self.abs_rel.update(abs_rel_sum, valid_pics)
|
| 152 |
+
|
| 153 |
+
# root mean squared error
|
| 154 |
+
rmse_sum, _ = get_rmse_err(pred, target, mask)
|
| 155 |
+
if is_distributed:
|
| 156 |
+
dist.all_reduce(rmse_sum)
|
| 157 |
+
rmse_sum = rmse_sum.cpu().numpy()
|
| 158 |
+
self.rmse.update(rmse_sum, valid_pics)
|
| 159 |
+
|
| 160 |
+
# log root mean squared error
|
| 161 |
+
log_rmse_sum, _ = get_rmse_log_err(pred, target, mask)
|
| 162 |
+
if is_distributed:
|
| 163 |
+
dist.all_reduce(log_rmse_sum)
|
| 164 |
+
log_rmse_sum = log_rmse_sum.cpu().numpy()
|
| 165 |
+
self.rmse_log.update(log_rmse_sum, valid_pics)
|
| 166 |
+
|
| 167 |
+
# log10 error
|
| 168 |
+
log10_sum, _ = get_log10_err(pred, target, mask)
|
| 169 |
+
if is_distributed:
|
| 170 |
+
dist.all_reduce(log10_sum)
|
| 171 |
+
log10_sum = log10_sum.cpu().numpy()
|
| 172 |
+
self.log10.update(log10_sum, valid_pics)
|
| 173 |
+
|
| 174 |
+
# scale-invariant root mean squared error in log space
|
| 175 |
+
silog_sum, _ = get_silog_err(pred, target, mask)
|
| 176 |
+
if is_distributed:
|
| 177 |
+
dist.all_reduce(silog_sum)
|
| 178 |
+
silog_sum = silog_sum.cpu().numpy()
|
| 179 |
+
self.silog.update(silog_sum, valid_pics)
|
| 180 |
+
|
| 181 |
+
# ratio error, delta1, ....
|
| 182 |
+
delta1_sum, delta2_sum, delta3_sum, _ = get_ratio_error(pred, target, mask)
|
| 183 |
+
if is_distributed:
|
| 184 |
+
dist.all_reduce(delta1_sum), dist.all_reduce(delta2_sum), dist.all_reduce(delta3_sum)
|
| 185 |
+
delta1_sum = delta1_sum.cpu().numpy()
|
| 186 |
+
delta2_sum = delta2_sum.cpu().numpy()
|
| 187 |
+
delta3_sum = delta3_sum.cpu().numpy()
|
| 188 |
+
|
| 189 |
+
self.delta1.update(delta1_sum, valid_pics)
|
| 190 |
+
self.delta2.update(delta2_sum, valid_pics)
|
| 191 |
+
self.delta3.update(delta3_sum, valid_pics)
|
| 192 |
+
|
| 193 |
+
# video consistency error
|
| 194 |
+
consistency_rel_sum, valid_warps = get_video_consistency_err(pred, pred_next, pose_f1_to_f2, intrinsic)
|
| 195 |
+
if is_distributed:
|
| 196 |
+
dist.all_reduce(consistency_rel_sum), dist.all_reduce(valid_warps)
|
| 197 |
+
consistency_rel_sum = consistency_rel_sum.cpu().numpy()
|
| 198 |
+
valid_warps = int(valid_warps)
|
| 199 |
+
self.consistency.update(consistency_rel_sum, valid_warps)
|
| 200 |
+
|
| 201 |
+
## for surface normal
|
| 202 |
+
def update_normal_metrics_gpu(
|
| 203 |
+
self,
|
| 204 |
+
pred: torch.Tensor, # (B, 3, H, W)
|
| 205 |
+
target: torch.Tensor, # (B, 3, H, W)
|
| 206 |
+
mask: torch.Tensor, # (B, 1, H, W)
|
| 207 |
+
is_distributed: bool,
|
| 208 |
+
):
|
| 209 |
+
"""
|
| 210 |
+
Update metric on GPU. It supports distributed processing. If multiple machines are employed, please
|
| 211 |
+
set 'is_distributed' as True.
|
| 212 |
+
"""
|
| 213 |
+
assert pred.shape == target.shape
|
| 214 |
+
|
| 215 |
+
valid_pics = torch.sum(mask, dtype=torch.float32) + 1e-6
|
| 216 |
+
|
| 217 |
+
if valid_pics < 10:
|
| 218 |
+
return
|
| 219 |
+
|
| 220 |
+
mean_error = rmse_error = a1_error = a2_error = dist_node_cnt = valid_pics
|
| 221 |
+
normal_error = torch.cosine_similarity(pred, target, dim=1)
|
| 222 |
+
normal_error = torch.clamp(normal_error, min=-1.0, max=1.0)
|
| 223 |
+
angle_error = torch.acos(normal_error) * 180.0 / torch.pi
|
| 224 |
+
angle_error = angle_error[:, None, :, :]
|
| 225 |
+
angle_error = angle_error[mask]
|
| 226 |
+
# Calculation error
|
| 227 |
+
mean_error = angle_error.sum() / valid_pics
|
| 228 |
+
rmse_error = torch.sqrt( torch.sum(torch.square(angle_error)) / valid_pics )
|
| 229 |
+
median_error = angle_error.median()
|
| 230 |
+
a1_error = 100.0 * (torch.sum(angle_error < 5) / valid_pics)
|
| 231 |
+
a2_error = 100.0 * (torch.sum(angle_error < 7.5) / valid_pics)
|
| 232 |
+
|
| 233 |
+
a3_error = 100.0 * (torch.sum(angle_error < 11.25) / valid_pics)
|
| 234 |
+
a4_error = 100.0 * (torch.sum(angle_error < 22.5) / valid_pics)
|
| 235 |
+
a5_error = 100.0 * (torch.sum(angle_error < 30) / valid_pics)
|
| 236 |
+
|
| 237 |
+
# if valid_pics > 1e-5:
|
| 238 |
+
# If the current node gets data with valid normal
|
| 239 |
+
dist_node_cnt = (valid_pics - 1e-6) / valid_pics
|
| 240 |
+
|
| 241 |
+
if is_distributed:
|
| 242 |
+
dist.all_reduce(dist_node_cnt)
|
| 243 |
+
dist.all_reduce(mean_error)
|
| 244 |
+
dist.all_reduce(rmse_error)
|
| 245 |
+
dist.all_reduce(a1_error)
|
| 246 |
+
dist.all_reduce(a2_error)
|
| 247 |
+
|
| 248 |
+
dist.all_reduce(a3_error)
|
| 249 |
+
dist.all_reduce(a4_error)
|
| 250 |
+
dist.all_reduce(a5_error)
|
| 251 |
+
|
| 252 |
+
dist_node_cnt = dist_node_cnt.cpu().numpy()
|
| 253 |
+
self.normal_mean.update(mean_error.cpu().numpy(), dist_node_cnt)
|
| 254 |
+
self.normal_rmse.update(rmse_error.cpu().numpy(), dist_node_cnt)
|
| 255 |
+
self.normal_a1.update(a1_error.cpu().numpy(), dist_node_cnt)
|
| 256 |
+
self.normal_a2.update(a2_error.cpu().numpy(), dist_node_cnt)
|
| 257 |
+
|
| 258 |
+
self.normal_median.update(median_error.cpu().numpy(), dist_node_cnt)
|
| 259 |
+
self.normal_a3.update(a3_error.cpu().numpy(), dist_node_cnt)
|
| 260 |
+
self.normal_a4.update(a4_error.cpu().numpy(), dist_node_cnt)
|
| 261 |
+
self.normal_a5.update(a5_error.cpu().numpy(), dist_node_cnt)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def get_metrics(self,):
|
| 265 |
+
"""
|
| 266 |
+
"""
|
| 267 |
+
metrics_dict = {}
|
| 268 |
+
for metric in self.metrics:
|
| 269 |
+
metrics_dict[metric] = self.__getattribute__(metric).avg
|
| 270 |
+
return metrics_dict
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def get_metrics(self,):
|
| 274 |
+
"""
|
| 275 |
+
"""
|
| 276 |
+
metrics_dict = {}
|
| 277 |
+
for metric in self.metrics:
|
| 278 |
+
metrics_dict[metric] = self.__getattribute__(metric).avg
|
| 279 |
+
return metrics_dict
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
def get_absrel_err(pred: torch.tensor,
|
| 283 |
+
target: torch.tensor,
|
| 284 |
+
mask: torch.tensor):
|
| 285 |
+
"""
|
| 286 |
+
Computes absolute relative error.
|
| 287 |
+
Takes preprocessed depths (no nans, infs and non-positive values).
|
| 288 |
+
pred, target, and mask should be in the shape of [b, c, h, w]
|
| 289 |
+
"""
|
| 290 |
+
|
| 291 |
+
assert len(pred.shape) == 4, len(target.shape) == 4
|
| 292 |
+
b, c, h, w = pred.shape
|
| 293 |
+
mask = mask.to(torch.float)
|
| 294 |
+
t_m = target * mask
|
| 295 |
+
p_m = pred * mask
|
| 296 |
+
|
| 297 |
+
#Mean Absolute Relative Error
|
| 298 |
+
rel = torch.abs(t_m - p_m) / (t_m + 1e-10) # compute errors
|
| 299 |
+
abs_rel_sum = torch.sum(rel.reshape((b, c, -1)), dim=2) # [b, c]
|
| 300 |
+
num = torch.sum(mask.reshape((b, c, -1)), dim=2) # [b, c]
|
| 301 |
+
abs_err = abs_rel_sum / (num + 1e-10)
|
| 302 |
+
valid_pics = torch.sum(num > 0)
|
| 303 |
+
return torch.sum(abs_err), valid_pics
|
| 304 |
+
|
| 305 |
+
def get_sqrel_err(pred: torch.tensor,
|
| 306 |
+
target: torch.tensor,
|
| 307 |
+
mask: torch.tensor):
|
| 308 |
+
"""
|
| 309 |
+
Computes squared relative error.
|
| 310 |
+
Takes preprocessed depths (no nans, infs and non-positive values).
|
| 311 |
+
pred, target, and mask should be in the shape of [b, c, h, w]
|
| 312 |
+
"""
|
| 313 |
+
|
| 314 |
+
assert len(pred.shape) == 4, len(target.shape) == 4
|
| 315 |
+
b, c, h, w = pred.shape
|
| 316 |
+
mask = mask.to(torch.float)
|
| 317 |
+
t_m = target * mask
|
| 318 |
+
p_m = pred * mask
|
| 319 |
+
|
| 320 |
+
#Mean Absolute Relative Error
|
| 321 |
+
sq_rel = torch.abs(t_m - p_m)**2 / (t_m + 1e-10) # compute errors
|
| 322 |
+
sq_rel_sum = torch.sum(sq_rel.reshape((b, c, -1)), dim=2) # [b, c]
|
| 323 |
+
num = torch.sum(mask.reshape((b, c, -1)), dim=2) # [b, c]
|
| 324 |
+
sqrel_err = sq_rel_sum / (num + 1e-10)
|
| 325 |
+
valid_pics = torch.sum(num > 0)
|
| 326 |
+
return torch.sum(sqrel_err), valid_pics
|
| 327 |
+
|
| 328 |
+
def get_log10_err(pred: torch.tensor,
|
| 329 |
+
target: torch.tensor,
|
| 330 |
+
mask: torch.tensor):
|
| 331 |
+
"""
|
| 332 |
+
Computes log10 error.
|
| 333 |
+
Takes preprocessed depths (no nans, infs and non-positive values).
|
| 334 |
+
pred, target, and mask should be in the shape of [b, c, h, w]
|
| 335 |
+
"""
|
| 336 |
+
|
| 337 |
+
assert len(pred.shape) == 4, len(target.shape) == 4
|
| 338 |
+
b, c, h, w = pred.shape
|
| 339 |
+
mask = mask.to(torch.float)
|
| 340 |
+
t_m = target * mask
|
| 341 |
+
p_m = pred * mask
|
| 342 |
+
|
| 343 |
+
diff_log = (torch.log10(p_m+1e-10) - torch.log10(t_m+1e-10)) * mask
|
| 344 |
+
log10_diff = torch.abs(diff_log) # compute errors
|
| 345 |
+
log10_sum = torch.sum(log10_diff.reshape((b, c, -1)), dim=2) # [b, c]
|
| 346 |
+
num = torch.sum(mask.reshape((b, c, -1)), dim=2) # [b, c]
|
| 347 |
+
abs_err = log10_sum / (num + 1e-10)
|
| 348 |
+
valid_pics = torch.sum(num > 0)
|
| 349 |
+
return torch.sum(abs_err), valid_pics
|
| 350 |
+
|
| 351 |
+
def get_rmse_err(pred: torch.tensor,
|
| 352 |
+
target: torch.tensor,
|
| 353 |
+
mask: torch.tensor):
|
| 354 |
+
"""
|
| 355 |
+
Computes log root mean squared error.
|
| 356 |
+
Takes preprocessed depths (no nans, infs and non-positive values).
|
| 357 |
+
pred, target, and mask should be in the shape of [b, c, h, w]
|
| 358 |
+
"""
|
| 359 |
+
assert len(pred.shape) == 4, len(target.shape) == 4
|
| 360 |
+
b, c, h, w = pred.shape
|
| 361 |
+
mask = mask.to(torch.float)
|
| 362 |
+
t_m = target * mask
|
| 363 |
+
p_m = pred * mask
|
| 364 |
+
|
| 365 |
+
square = (t_m - p_m) ** 2
|
| 366 |
+
rmse_sum = torch.sum(square.reshape((b, c, -1)), dim=2) # [b, c]
|
| 367 |
+
num = torch.sum(mask.reshape((b, c, -1)), dim=2) # [b, c]
|
| 368 |
+
rmse = torch.sqrt(rmse_sum / (num + 1e-10))
|
| 369 |
+
valid_pics = torch.sum(num > 0)
|
| 370 |
+
return torch.sum(rmse), valid_pics
|
| 371 |
+
|
| 372 |
+
def get_rmse_log_err(pred: torch.tensor,
|
| 373 |
+
target: torch.tensor,
|
| 374 |
+
mask: torch.tensor):
|
| 375 |
+
"""
|
| 376 |
+
Computes root mean squared error.
|
| 377 |
+
Takes preprocessed depths (no nans, infs and non-positive values).
|
| 378 |
+
pred, target, and mask should be in the shape of [b, c, h, w]
|
| 379 |
+
"""
|
| 380 |
+
assert len(pred.shape) == 4, len(target.shape) == 4
|
| 381 |
+
b, c, h, w = pred.shape
|
| 382 |
+
mask = mask.to(torch.float)
|
| 383 |
+
t_m = target * mask
|
| 384 |
+
p_m = pred * mask
|
| 385 |
+
|
| 386 |
+
diff_log = (torch.log(p_m+1e-10) - torch.log(t_m+1e-10)) * mask
|
| 387 |
+
square = diff_log ** 2
|
| 388 |
+
rmse_sum = torch.sum(square.reshape((b, c, -1)), dim=2) # [b, c]
|
| 389 |
+
num = torch.sum(mask.reshape((b, c, -1)), dim=2) # [b, c]
|
| 390 |
+
rmse = torch.sqrt(rmse_sum / (num + 1e-10))
|
| 391 |
+
valid_pics = torch.sum(num > 0)
|
| 392 |
+
return torch.sum(rmse), valid_pics
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
def get_silog_err(pred: torch.tensor,
|
| 396 |
+
target: torch.tensor,
|
| 397 |
+
mask: torch.tensor):
|
| 398 |
+
"""
|
| 399 |
+
Computes scale invariant loss based on differences of logs of depth maps.
|
| 400 |
+
Takes preprocessed depths (no nans, infs and non-positive values).
|
| 401 |
+
pred, target, and mask should be in the shape of [b, c, h, w]
|
| 402 |
+
"""
|
| 403 |
+
assert len(pred.shape) == 4, len(target.shape) == 4
|
| 404 |
+
b, c, h, w = pred.shape
|
| 405 |
+
mask = mask.to(torch.float)
|
| 406 |
+
t_m = target * mask
|
| 407 |
+
p_m = pred * mask
|
| 408 |
+
|
| 409 |
+
diff_log = (torch.log(p_m+1e-10) - torch.log(t_m+1e-10)) * mask
|
| 410 |
+
diff_log_sum = torch.sum(diff_log.reshape((b, c, -1)), dim=2) # [b, c]
|
| 411 |
+
diff_log_square = diff_log ** 2
|
| 412 |
+
diff_log_square_sum = torch.sum(diff_log_square.reshape((b, c, -1)), dim=2) # [b, c]
|
| 413 |
+
num = torch.sum(mask.reshape((b, c, -1)), dim=2) # [b, c]
|
| 414 |
+
silog = torch.sqrt(diff_log_square_sum / (num + 1e-10) - (diff_log_sum / (num + 1e-10)) **2 )
|
| 415 |
+
valid_pics = torch.sum(num > 0)
|
| 416 |
+
if torch.isnan(torch.sum(silog)):
|
| 417 |
+
print('None in silog')
|
| 418 |
+
return torch.sum(silog), valid_pics
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def get_ratio_error(pred: torch.tensor,
|
| 422 |
+
target: torch.tensor,
|
| 423 |
+
mask: torch.tensor):
|
| 424 |
+
"""
|
| 425 |
+
Computes the percentage of pixels for which the ratio of the two depth maps is less than a given threshold.
|
| 426 |
+
Takes preprocessed depths (no nans, infs and non-positive values).
|
| 427 |
+
pred, target, and mask should be in the shape of [b, c, h, w]
|
| 428 |
+
"""
|
| 429 |
+
assert len(pred.shape) == 4, len(target.shape) == 4
|
| 430 |
+
b, c, h, w = pred.shape
|
| 431 |
+
mask = mask.to(torch.float)
|
| 432 |
+
t_m = target * mask
|
| 433 |
+
p_m = pred
|
| 434 |
+
|
| 435 |
+
gt_pred = t_m / (p_m + 1e-10)
|
| 436 |
+
pred_gt = p_m / (t_m + 1e-10)
|
| 437 |
+
gt_pred = gt_pred.reshape((b, c, -1))
|
| 438 |
+
pred_gt = pred_gt.reshape((b, c, -1))
|
| 439 |
+
gt_pred_gt = torch.cat((gt_pred, pred_gt), axis=1)
|
| 440 |
+
ratio_max = torch.amax(gt_pred_gt, axis=1)
|
| 441 |
+
|
| 442 |
+
mask = mask.reshape((b, -1))
|
| 443 |
+
delta_1_sum = torch.sum((ratio_max < 1.25) * mask, dim=1) # [b, ]
|
| 444 |
+
delta_2_sum = torch.sum((ratio_max < 1.25**2) * mask, dim=1) # [b,]
|
| 445 |
+
delta_3_sum = torch.sum((ratio_max < 1.25**3) * mask, dim=1) # [b, ]
|
| 446 |
+
num = torch.sum(mask, dim=1) # [b, ]
|
| 447 |
+
|
| 448 |
+
delta_1 = delta_1_sum / (num + 1e-10)
|
| 449 |
+
delta_2 = delta_2_sum / (num + 1e-10)
|
| 450 |
+
delta_3 = delta_3_sum / (num + 1e-10)
|
| 451 |
+
valid_pics = torch.sum(num > 0)
|
| 452 |
+
|
| 453 |
+
return torch.sum(delta_1), torch.sum(delta_2), torch.sum(delta_3), valid_pics
|
| 454 |
+
|
| 455 |
+
def unproj_pcd(
|
| 456 |
+
depth: torch.tensor,
|
| 457 |
+
intrinsic: torch.tensor
|
| 458 |
+
):
|
| 459 |
+
depth = depth.squeeze(1) # [B, H, W]
|
| 460 |
+
b, h, w = depth.size()
|
| 461 |
+
v = torch.arange(0, h).view(1, h, 1).expand(b, h, w).type_as(depth) # [B, H, W]
|
| 462 |
+
u = torch.arange(0, w).view(1, 1, w).expand(b, h, w).type_as(depth) # [B, H, W]
|
| 463 |
+
x = (u - intrinsic[:, 0, 2]) / intrinsic[:, 0, 0] * depth # [B, H, W]
|
| 464 |
+
y = (v - intrinsic[:, 1, 2]) / intrinsic[:, 0, 0] * depth # [B, H, W]
|
| 465 |
+
pcd = torch.stack([x, y, depth], dim=1)
|
| 466 |
+
return pcd
|
| 467 |
+
|
| 468 |
+
def forward_warp(
|
| 469 |
+
depth: torch.tensor,
|
| 470 |
+
intrinsic: torch.tensor,
|
| 471 |
+
pose: torch.tensor,
|
| 472 |
+
):
|
| 473 |
+
"""
|
| 474 |
+
Warp the depth with the provided pose.
|
| 475 |
+
Args:
|
| 476 |
+
depth: depth map of the target image -- [B, 1, H, W]
|
| 477 |
+
intrinsic: camera intrinsic parameters -- [B, 3, 3]
|
| 478 |
+
pose: the camera pose -- [B, 4, 4]
|
| 479 |
+
"""
|
| 480 |
+
B, _, H, W = depth.shape
|
| 481 |
+
pcd = unproj_pcd(depth.float(), intrinsic.float())
|
| 482 |
+
pcd = pcd.reshape(B, 3, -1) # [B, 3, H*W]
|
| 483 |
+
rot, tr = pose[:, :3, :3], pose[:, :3, -1:]
|
| 484 |
+
proj_pcd = rot @ pcd + tr
|
| 485 |
+
|
| 486 |
+
img_coors = intrinsic @ proj_pcd
|
| 487 |
+
|
| 488 |
+
X = img_coors[:, 0, :]
|
| 489 |
+
Y = img_coors[:, 1, :]
|
| 490 |
+
Z = img_coors[:, 2, :].clamp(min=1e-3)
|
| 491 |
+
|
| 492 |
+
x_img_coor = (X/Z + 0.5).long()
|
| 493 |
+
y_img_coor = (Y/Z + 0.5).long()
|
| 494 |
+
|
| 495 |
+
X_mask = ((x_img_coor >=0) & (x_img_coor < W))
|
| 496 |
+
Y_mask = ((y_img_coor >=0) & (y_img_coor < H))
|
| 497 |
+
mask = X_mask & Y_mask
|
| 498 |
+
|
| 499 |
+
proj_depth = torch.zeros_like(Z).reshape(B, 1, H, W)
|
| 500 |
+
for i in range(B):
|
| 501 |
+
proj_depth[i, :, y_img_coor[i,...][mask[i,...]], x_img_coor[i,...][mask[i,...]]] = Z[i,...][mask[i,...]]
|
| 502 |
+
plt.imsave('warp2.png', proj_depth.squeeze().cpu().numpy(), cmap='rainbow')
|
| 503 |
+
return proj_depth
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def get_video_consistency_err(
|
| 507 |
+
pred_f1: torch.tensor,
|
| 508 |
+
pred_f2: torch.tensor,
|
| 509 |
+
ego_pose_f1_to_f2: torch.tensor,
|
| 510 |
+
intrinsic: torch.tensor,
|
| 511 |
+
):
|
| 512 |
+
"""
|
| 513 |
+
Compute consistency error between consecutive frames.
|
| 514 |
+
"""
|
| 515 |
+
if pred_f2 is None or ego_pose_f1_to_f2 is None or intrinsic is None:
|
| 516 |
+
return torch.zeros_like(pred_f1).sum(), torch.zeros_like(pred_f1).sum()
|
| 517 |
+
ego_pose_f1_to_f2 = ego_pose_f1_to_f2.float()
|
| 518 |
+
pred_f2 = pred_f2.float()
|
| 519 |
+
|
| 520 |
+
pred_f1 = pred_f1[:, None, :, :] if pred_f1.ndim == 3 else pred_f1
|
| 521 |
+
pred_f2 = pred_f2[:, None, :, :] if pred_f2.ndim == 3 else pred_f2
|
| 522 |
+
pred_f1 = pred_f1[None, None, :, :] if pred_f1.ndim == 2 else pred_f1
|
| 523 |
+
pred_f2 = pred_f2[None, None, :, :] if pred_f2.ndim == 2 else pred_f2
|
| 524 |
+
|
| 525 |
+
B, _, H, W = pred_f1.shape
|
| 526 |
+
# Get projection matrix for tgt camera frame to source pixel frame
|
| 527 |
+
cam_coords = pixel2cam(pred_f1.squeeze(1).float(), intrinsic.inverse().float()) # [B,3,H,W]
|
| 528 |
+
#proj_depth_my = forward_warp(pred_f1, intrinsic, ego_pose_f1_to_f2)
|
| 529 |
+
|
| 530 |
+
proj_f1_to_f2 = intrinsic @ ego_pose_f1_to_f2[:, :3, :] # [B, 3, 4]
|
| 531 |
+
rot, tr = proj_f1_to_f2[:, :, :3], proj_f1_to_f2[:, :, -1:]
|
| 532 |
+
f2_pixel_coords, warped_depth_f1_to_f2 = cam2pixel2(cam_coords, rot, tr, padding_mode="zeros") # [B,H,W,2]
|
| 533 |
+
|
| 534 |
+
projected_depth = F.grid_sample(pred_f2, f2_pixel_coords, padding_mode="zeros", align_corners=False)
|
| 535 |
+
|
| 536 |
+
mask_valid = (projected_depth > 1e-6) & (warped_depth_f1_to_f2 > 1e-6)
|
| 537 |
+
|
| 538 |
+
# plt.imsave('f1.png', pred_f1.squeeze().cpu().numpy(), cmap='rainbow')
|
| 539 |
+
# plt.imsave('f2.png', pred_f2.squeeze().cpu().numpy(), cmap='rainbow')
|
| 540 |
+
# plt.imsave('warp.png', warped_depth_f1_to_f2.squeeze().cpu().numpy(), cmap='rainbow')
|
| 541 |
+
# plt.imsave('proj.png', projected_depth.squeeze().cpu().numpy(), cmap='rainbow')
|
| 542 |
+
|
| 543 |
+
consistency_rel_err, valid_pix = get_absrel_err(warped_depth_f1_to_f2, projected_depth, mask_valid)
|
| 544 |
+
return consistency_rel_err, valid_pix
|
| 545 |
+
|
| 546 |
+
|
| 547 |
+
if __name__ == '__main__':
|
| 548 |
+
cfg = ['abs_rel', 'delta1']
|
| 549 |
+
dam = MetricAverageMeter(cfg)
|
| 550 |
+
|
| 551 |
+
pred_depth = np.random.random([2, 480, 640])
|
| 552 |
+
gt_depth = np.random.random([2, 480, 640]) - 0.5 #np.ones_like(pred_depth) * (-1) #
|
| 553 |
+
intrinsic = [[100, 100, 200, 200], [200, 200, 300, 300]]
|
| 554 |
+
|
| 555 |
+
pred = torch.from_numpy(pred_depth).cuda()
|
| 556 |
+
gt = torch.from_numpy(gt_depth).cuda()
|
| 557 |
+
|
| 558 |
+
mask = gt > 0
|
| 559 |
+
dam.update_metrics_gpu(pred, pred, mask, False)
|
| 560 |
+
eval_error = dam.get_metrics()
|
| 561 |
+
print(eval_error)
|
external/Metric3D/training/mono/utils/comm.py
ADDED
|
@@ -0,0 +1,343 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import importlib
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
from .avg_meter import AverageMeter
|
| 5 |
+
from collections import defaultdict, OrderedDict
|
| 6 |
+
import os
|
| 7 |
+
import socket
|
| 8 |
+
from mmcv.utils import collect_env as collect_base_env
|
| 9 |
+
try:
|
| 10 |
+
from mmcv.utils import get_git_hash
|
| 11 |
+
except:
|
| 12 |
+
from mmengine import get_git_hash
|
| 13 |
+
#import mono.mmseg as mmseg
|
| 14 |
+
import mmseg
|
| 15 |
+
import time
|
| 16 |
+
import datetime
|
| 17 |
+
import logging
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def main_process() -> bool:
|
| 21 |
+
return get_rank() == 0
|
| 22 |
+
#return not cfg.distributed or \
|
| 23 |
+
# (cfg.distributed and cfg.local_rank == 0)
|
| 24 |
+
|
| 25 |
+
def get_world_size() -> int:
|
| 26 |
+
if not dist.is_available():
|
| 27 |
+
return 1
|
| 28 |
+
if not dist.is_initialized():
|
| 29 |
+
return 1
|
| 30 |
+
return dist.get_world_size()
|
| 31 |
+
|
| 32 |
+
def get_rank() -> int:
|
| 33 |
+
if not dist.is_available():
|
| 34 |
+
return 0
|
| 35 |
+
if not dist.is_initialized():
|
| 36 |
+
return 0
|
| 37 |
+
return dist.get_rank()
|
| 38 |
+
|
| 39 |
+
def _find_free_port():
|
| 40 |
+
# refer to https://github.com/facebookresearch/detectron2/blob/main/detectron2/engine/launch.py # noqa: E501
|
| 41 |
+
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| 42 |
+
# Binding to port 0 will cause the OS to find an available port for us
|
| 43 |
+
sock.bind(('', 0))
|
| 44 |
+
port = sock.getsockname()[1]
|
| 45 |
+
sock.close()
|
| 46 |
+
# NOTE: there is still a chance the port could be taken by other processes.
|
| 47 |
+
return port
|
| 48 |
+
|
| 49 |
+
def _is_free_port(port):
|
| 50 |
+
ips = socket.gethostbyname_ex(socket.gethostname())[-1]
|
| 51 |
+
ips.append('localhost')
|
| 52 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
| 53 |
+
return all(s.connect_ex((ip, port)) != 0 for ip in ips)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def collect_env():
|
| 57 |
+
"""Collect the information of the running environments."""
|
| 58 |
+
env_info = collect_base_env()
|
| 59 |
+
env_info['MMSegmentation'] = f'{mmseg.__version__}+{get_git_hash()[:7]}'
|
| 60 |
+
|
| 61 |
+
return env_info
|
| 62 |
+
|
| 63 |
+
def init_env(launcher, cfg):
|
| 64 |
+
"""Initialize distributed training environment.
|
| 65 |
+
If argument ``cfg.dist_params.dist_url`` is specified as 'env://', then the master port will be system
|
| 66 |
+
environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
|
| 67 |
+
environment variable, then a default port ``29500`` will be used.
|
| 68 |
+
"""
|
| 69 |
+
if launcher == 'slurm':
|
| 70 |
+
_init_dist_slurm(cfg)
|
| 71 |
+
elif launcher == 'ror':
|
| 72 |
+
_init_dist_ror(cfg)
|
| 73 |
+
elif launcher == 'None':
|
| 74 |
+
_init_none_dist(cfg)
|
| 75 |
+
elif launcher == 'pytorch':
|
| 76 |
+
_init_dist_pytorch(cfg)
|
| 77 |
+
else:
|
| 78 |
+
raise RuntimeError(f'{cfg.launcher} has not been supported!')
|
| 79 |
+
|
| 80 |
+
def _init_none_dist(cfg):
|
| 81 |
+
cfg.dist_params.num_gpus_per_node = 1
|
| 82 |
+
cfg.dist_params.world_size = 1
|
| 83 |
+
cfg.dist_params.nnodes = 1
|
| 84 |
+
cfg.dist_params.node_rank = 0
|
| 85 |
+
cfg.dist_params.global_rank = 0
|
| 86 |
+
cfg.dist_params.local_rank = 0
|
| 87 |
+
os.environ["WORLD_SIZE"] = str(1)
|
| 88 |
+
|
| 89 |
+
def _init_dist_ror(cfg):
|
| 90 |
+
from ac2.ror.comm import get_local_rank, get_world_rank, get_local_size, get_node_rank, get_world_size
|
| 91 |
+
cfg.dist_params.num_gpus_per_node = get_local_size()
|
| 92 |
+
cfg.dist_params.world_size = get_world_size()
|
| 93 |
+
cfg.dist_params.nnodes = (get_world_size()) // (get_local_size())
|
| 94 |
+
cfg.dist_params.node_rank = get_node_rank()
|
| 95 |
+
cfg.dist_params.global_rank = get_world_rank()
|
| 96 |
+
cfg.dist_params.local_rank = get_local_rank()
|
| 97 |
+
os.environ["WORLD_SIZE"] = str(get_world_size())
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _init_dist_pytorch(cfg):
|
| 101 |
+
# load env. paras.
|
| 102 |
+
local_rank = int(os.environ['LOCAL_RANK'])
|
| 103 |
+
world_size = int(os.environ['WORLD_SIZE'])
|
| 104 |
+
global_rank = int(os.environ['RANK'])
|
| 105 |
+
num_gpus = torch.cuda.device_count()
|
| 106 |
+
|
| 107 |
+
cfg.dist_params.num_gpus_per_node = num_gpus
|
| 108 |
+
cfg.dist_params.world_size = world_size
|
| 109 |
+
cfg.dist_params.nnodes = int(world_size // num_gpus)
|
| 110 |
+
cfg.dist_params.node_rank = int(global_rank % num_gpus)
|
| 111 |
+
cfg.dist_params.global_rank = global_rank
|
| 112 |
+
|
| 113 |
+
os.environ['NODE_RANK'] = str(cfg.dist_params.node_rank)
|
| 114 |
+
# set dist_url to 'env://'
|
| 115 |
+
cfg.dist_params.dist_url = 'env://' #f"{master_addr}:{master_port}"
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def _init_dist_slurm(cfg):
|
| 119 |
+
if 'NNODES' not in os.environ:
|
| 120 |
+
os.environ['NNODES'] = str(cfg.dist_params.nnodes)
|
| 121 |
+
if 'NODE_RANK' not in os.environ:
|
| 122 |
+
os.environ['NODE_RANK'] = str(cfg.dist_params.node_rank)
|
| 123 |
+
|
| 124 |
+
#cfg.dist_params.
|
| 125 |
+
num_gpus = torch.cuda.device_count()
|
| 126 |
+
world_size = int(os.environ['NNODES']) * num_gpus
|
| 127 |
+
os.environ['WORLD_SIZE'] = str(world_size)
|
| 128 |
+
|
| 129 |
+
# config port
|
| 130 |
+
if 'MASTER_PORT' in os.environ:
|
| 131 |
+
master_port = str(os.environ['MASTER_PORT']) # use MASTER_PORT in the environment variable
|
| 132 |
+
else:
|
| 133 |
+
# if torch.distributed default port(29500) is available
|
| 134 |
+
# then use it, else find a free port
|
| 135 |
+
if _is_free_port(16500):
|
| 136 |
+
master_port = '16500'
|
| 137 |
+
else:
|
| 138 |
+
master_port = str(_find_free_port())
|
| 139 |
+
os.environ['MASTER_PORT'] = master_port
|
| 140 |
+
|
| 141 |
+
# config addr
|
| 142 |
+
if 'MASTER_ADDR' in os.environ:
|
| 143 |
+
master_addr = str(os.environ['MASTER_PORT']) # use MASTER_PORT in the environment variable
|
| 144 |
+
# elif cfg.dist_params.dist_url is not None:
|
| 145 |
+
# master_addr = ':'.join(str(cfg.dist_params.dist_url).split(':')[:2])
|
| 146 |
+
else:
|
| 147 |
+
master_addr = '127.0.0.1' #'tcp://127.0.0.1'
|
| 148 |
+
os.environ['MASTER_ADDR'] = master_addr
|
| 149 |
+
|
| 150 |
+
# set dist_url to 'env://'
|
| 151 |
+
cfg.dist_params.dist_url = 'env://' #f"{master_addr}:{master_port}"
|
| 152 |
+
|
| 153 |
+
cfg.dist_params.num_gpus_per_node = num_gpus
|
| 154 |
+
cfg.dist_params.world_size = world_size
|
| 155 |
+
cfg.dist_params.nnodes = int(os.environ['NNODES'])
|
| 156 |
+
cfg.dist_params.node_rank = int(os.environ['NODE_RANK'])
|
| 157 |
+
|
| 158 |
+
# if int(os.environ['NNODES']) > 1 and cfg.dist_params.dist_url.startswith("file://"):
|
| 159 |
+
# raise Warning("file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def get_func(func_name):
|
| 163 |
+
"""
|
| 164 |
+
Helper to return a function object by name. func_name must identify
|
| 165 |
+
a function in this module or the path to a function relative to the base
|
| 166 |
+
module.
|
| 167 |
+
@ func_name: function name.
|
| 168 |
+
"""
|
| 169 |
+
if func_name == '':
|
| 170 |
+
return None
|
| 171 |
+
try:
|
| 172 |
+
parts = func_name.split('.')
|
| 173 |
+
# Refers to a function in this module
|
| 174 |
+
if len(parts) == 1:
|
| 175 |
+
return globals()[parts[0]]
|
| 176 |
+
# Otherwise, assume we're referencing a module under modeling
|
| 177 |
+
module_name = '.'.join(parts[:-1])
|
| 178 |
+
module = importlib.import_module(module_name)
|
| 179 |
+
return getattr(module, parts[-1])
|
| 180 |
+
except:
|
| 181 |
+
raise RuntimeError(f'Failed to find function: {func_name}')
|
| 182 |
+
|
| 183 |
+
class Timer(object):
|
| 184 |
+
"""A simple timer."""
|
| 185 |
+
|
| 186 |
+
def __init__(self):
|
| 187 |
+
self.reset()
|
| 188 |
+
|
| 189 |
+
def tic(self):
|
| 190 |
+
# using time.time instead of time.clock because time time.clock
|
| 191 |
+
# does not normalize for multithreading
|
| 192 |
+
self.start_time = time.time()
|
| 193 |
+
|
| 194 |
+
def toc(self, average=True):
|
| 195 |
+
self.diff = time.time() - self.start_time
|
| 196 |
+
self.total_time += self.diff
|
| 197 |
+
self.calls += 1
|
| 198 |
+
self.average_time = self.total_time / self.calls
|
| 199 |
+
if average:
|
| 200 |
+
return self.average_time
|
| 201 |
+
else:
|
| 202 |
+
return self.diff
|
| 203 |
+
|
| 204 |
+
def reset(self):
|
| 205 |
+
self.total_time = 0.
|
| 206 |
+
self.calls = 0
|
| 207 |
+
self.start_time = 0.
|
| 208 |
+
self.diff = 0.
|
| 209 |
+
self.average_time = 0.
|
| 210 |
+
|
| 211 |
+
class TrainingStats(object):
|
| 212 |
+
"""Track vital training statistics."""
|
| 213 |
+
def __init__(self, log_period, tensorboard_logger=None):
|
| 214 |
+
self.log_period = log_period
|
| 215 |
+
self.tblogger = tensorboard_logger
|
| 216 |
+
self.tb_ignored_keys = ['iter', 'eta', 'epoch', 'time', 'val_err']
|
| 217 |
+
self.iter_timer = Timer()
|
| 218 |
+
# Window size for smoothing tracked values (with median filtering)
|
| 219 |
+
self.filter_size = log_period
|
| 220 |
+
def create_smoothed_value():
|
| 221 |
+
return AverageMeter()
|
| 222 |
+
self.smoothed_losses = defaultdict(create_smoothed_value)
|
| 223 |
+
#self.smoothed_metrics = defaultdict(create_smoothed_value)
|
| 224 |
+
#self.smoothed_total_loss = AverageMeter()
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def IterTic(self):
|
| 228 |
+
self.iter_timer.tic()
|
| 229 |
+
|
| 230 |
+
def IterToc(self):
|
| 231 |
+
return self.iter_timer.toc(average=False)
|
| 232 |
+
|
| 233 |
+
def reset_iter_time(self):
|
| 234 |
+
self.iter_timer.reset()
|
| 235 |
+
|
| 236 |
+
def update_iter_stats(self, losses_dict):
|
| 237 |
+
"""Update tracked iteration statistics."""
|
| 238 |
+
for k, v in losses_dict.items():
|
| 239 |
+
self.smoothed_losses[k].update(float(v), 1)
|
| 240 |
+
|
| 241 |
+
def log_iter_stats(self, cur_iter, optimizer, max_iters, val_err={}):
|
| 242 |
+
"""Log the tracked statistics."""
|
| 243 |
+
if (cur_iter % self.log_period == 0):
|
| 244 |
+
stats = self.get_stats(cur_iter, optimizer, max_iters, val_err)
|
| 245 |
+
log_stats(stats)
|
| 246 |
+
if self.tblogger:
|
| 247 |
+
self.tb_log_stats(stats, cur_iter)
|
| 248 |
+
for k, v in self.smoothed_losses.items():
|
| 249 |
+
v.reset()
|
| 250 |
+
self.iter_timer.reset() # reset time counting every log period
|
| 251 |
+
|
| 252 |
+
def tb_log_stats(self, stats, cur_iter):
|
| 253 |
+
"""Log the tracked statistics to tensorboard"""
|
| 254 |
+
for k in stats:
|
| 255 |
+
# ignore some logs
|
| 256 |
+
if k not in self.tb_ignored_keys:
|
| 257 |
+
v = stats[k]
|
| 258 |
+
if isinstance(v, dict):
|
| 259 |
+
self.tb_log_stats(v, cur_iter)
|
| 260 |
+
else:
|
| 261 |
+
self.tblogger.add_scalar(k, v, cur_iter)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def get_stats(self, cur_iter, optimizer, max_iters, val_err = {}):
|
| 265 |
+
eta_seconds = self.iter_timer.average_time * (max_iters - cur_iter)
|
| 266 |
+
|
| 267 |
+
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
|
| 268 |
+
stats = OrderedDict(
|
| 269 |
+
iter=cur_iter, # 1-indexed
|
| 270 |
+
time=self.iter_timer.average_time,
|
| 271 |
+
eta=eta,
|
| 272 |
+
)
|
| 273 |
+
optimizer_state_dict = optimizer.state_dict()
|
| 274 |
+
lr = {}
|
| 275 |
+
for i in range(len(optimizer_state_dict['param_groups'])):
|
| 276 |
+
lr_name = 'group%d_lr' % i
|
| 277 |
+
lr[lr_name] = optimizer_state_dict['param_groups'][i]['lr']
|
| 278 |
+
|
| 279 |
+
stats['lr'] = OrderedDict(lr)
|
| 280 |
+
for k, v in self.smoothed_losses.items():
|
| 281 |
+
stats[k] = v.avg
|
| 282 |
+
|
| 283 |
+
stats['val_err'] = OrderedDict(val_err)
|
| 284 |
+
stats['max_iters'] = max_iters
|
| 285 |
+
return stats
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def reduce_dict(input_dict, average=True):
|
| 289 |
+
"""
|
| 290 |
+
Reduce the values in the dictionary from all processes so that process with rank
|
| 291 |
+
0 has the reduced results.
|
| 292 |
+
Args:
|
| 293 |
+
@input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
|
| 294 |
+
@average (bool): whether to do average or sum
|
| 295 |
+
Returns:
|
| 296 |
+
a dict with the same keys as input_dict, after reduction.
|
| 297 |
+
"""
|
| 298 |
+
world_size = get_world_size()
|
| 299 |
+
if world_size < 2:
|
| 300 |
+
return input_dict
|
| 301 |
+
with torch.no_grad():
|
| 302 |
+
names = []
|
| 303 |
+
values = []
|
| 304 |
+
# sort the keys so that they are consistent across processes
|
| 305 |
+
for k in sorted(input_dict.keys()):
|
| 306 |
+
names.append(k)
|
| 307 |
+
values.append(input_dict[k])
|
| 308 |
+
values = torch.stack(values, dim=0)
|
| 309 |
+
dist.reduce(values, dst=0)
|
| 310 |
+
if dist.get_rank() == 0 and average:
|
| 311 |
+
# only main process gets accumulated, so only divide by
|
| 312 |
+
# world_size in this case
|
| 313 |
+
values /= world_size
|
| 314 |
+
reduced_dict = {k: v for k, v in zip(names, values)}
|
| 315 |
+
return reduced_dict
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def log_stats(stats):
|
| 319 |
+
logger = logging.getLogger()
|
| 320 |
+
"""Log training statistics to terminal"""
|
| 321 |
+
lines = "[Step %d/%d]\n" % (
|
| 322 |
+
stats['iter'], stats['max_iters'])
|
| 323 |
+
|
| 324 |
+
lines += "\t\tloss: %.3f, time: %.6f, eta: %s\n" % (
|
| 325 |
+
stats['total_loss'], stats['time'], stats['eta'])
|
| 326 |
+
|
| 327 |
+
# log loss
|
| 328 |
+
lines += "\t\t"
|
| 329 |
+
for k, v in stats.items():
|
| 330 |
+
if 'loss' in k.lower() and 'total_loss' not in k.lower():
|
| 331 |
+
lines += "%s: %.3f" % (k, v) + ", "
|
| 332 |
+
lines = lines[:-3]
|
| 333 |
+
lines += '\n'
|
| 334 |
+
|
| 335 |
+
# validate criteria
|
| 336 |
+
lines += "\t\tlast val err:" + ", ".join("%s: %.6f" % (k, v) for k, v in stats['val_err'].items()) + ", "
|
| 337 |
+
lines += '\n'
|
| 338 |
+
|
| 339 |
+
# lr in different groups
|
| 340 |
+
lines += "\t\t" + ", ".join("%s: %.8f" % (k, v) for k, v in stats['lr'].items())
|
| 341 |
+
lines += '\n'
|
| 342 |
+
logger.info(lines[:-1]) # remove last new linen_pxl
|
| 343 |
+
|
external/Metric3D/training/mono/utils/db.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from types import ModuleType
|
| 2 |
+
import data_server_info # data infomation on some server
|
| 3 |
+
|
| 4 |
+
def load_data_info(module_name, data_info={}, db_type='db_info', module=None):
|
| 5 |
+
if module is None:
|
| 6 |
+
module = globals().get(module_name, None)
|
| 7 |
+
if module:
|
| 8 |
+
for key, value in module.__dict__.items():
|
| 9 |
+
|
| 10 |
+
if not (key.startswith('__')) and not (key.startswith('_')):
|
| 11 |
+
if key == 'db_info':
|
| 12 |
+
data_info.update(value)
|
| 13 |
+
elif isinstance(value, ModuleType):
|
| 14 |
+
load_data_info(module_name + '.' + key, data_info, module=value)
|
| 15 |
+
else:
|
| 16 |
+
raise RuntimeError(f'Try to access "db_info", but cannot find {module_name} module.')
|
| 17 |
+
|
| 18 |
+
def reset_ckpt_path(cfg, data_info):
|
| 19 |
+
if isinstance(cfg, dict):
|
| 20 |
+
for key in cfg.keys():
|
| 21 |
+
if key == 'backbone':
|
| 22 |
+
new_ckpt_path = data_info['checkpoint']['db_root'] + '/' + data_info['checkpoint'][cfg.backbone.type]
|
| 23 |
+
cfg.backbone.update(checkpoint=new_ckpt_path)
|
| 24 |
+
continue
|
| 25 |
+
elif isinstance(cfg.get(key), dict):
|
| 26 |
+
reset_ckpt_path(cfg.get(key), data_info)
|
| 27 |
+
else:
|
| 28 |
+
continue
|
| 29 |
+
else:
|
| 30 |
+
return
|
| 31 |
+
|
| 32 |
+
if __name__ == '__main__':
|
| 33 |
+
db_info_tmp = {}
|
| 34 |
+
load_data_info('db_data_info', db_info_tmp)
|
| 35 |
+
print('results', db_info_tmp.keys())
|
| 36 |
+
|
external/Metric3D/training/mono/utils/do_test.py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
from mono.utils.avg_meter import MetricAverageMeter
|
| 5 |
+
from mono.utils.visualization import save_val_imgs, visual_train_data, create_html, save_raw_imgs, save_normal_val_imgs
|
| 6 |
+
import cv2
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import numpy as np
|
| 9 |
+
from mono.utils.logger import setup_logger
|
| 10 |
+
from mono.utils.comm import main_process
|
| 11 |
+
#from scipy.optimize import minimize
|
| 12 |
+
#from torchmin import minimize
|
| 13 |
+
import torch.optim as optim
|
| 14 |
+
from torch.autograd import Variable
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def to_cuda(data: dict):
|
| 18 |
+
for k, v in data.items():
|
| 19 |
+
if isinstance(v, torch.Tensor):
|
| 20 |
+
data[k] = v.cuda(non_blocking=True)
|
| 21 |
+
if isinstance(v, list) and len(v)>=1 and isinstance(v[0], torch.Tensor):
|
| 22 |
+
for i, l_i in enumerate(v):
|
| 23 |
+
data[k][i] = l_i.cuda(non_blocking=True)
|
| 24 |
+
return data
|
| 25 |
+
|
| 26 |
+
def align_scale(pred: torch.tensor, target: torch.tensor):
|
| 27 |
+
mask = target > 0
|
| 28 |
+
if torch.sum(mask) > 10:
|
| 29 |
+
scale = torch.median(target[mask]) / (torch.median(pred[mask]) + 1e-8)
|
| 30 |
+
else:
|
| 31 |
+
scale = 1
|
| 32 |
+
pred_scale = pred * scale
|
| 33 |
+
return pred_scale, scale
|
| 34 |
+
|
| 35 |
+
def align_shift(pred: torch.tensor, target: torch.tensor):
|
| 36 |
+
mask = target > 0
|
| 37 |
+
if torch.sum(mask) > 10:
|
| 38 |
+
shift = torch.median(target[mask]) - (torch.median(pred[mask]) + 1e-8)
|
| 39 |
+
else:
|
| 40 |
+
shift = 0
|
| 41 |
+
pred_shift = pred + shift
|
| 42 |
+
return pred_shift, shift
|
| 43 |
+
|
| 44 |
+
def align_scale_shift(pred: torch.tensor, target: torch.tensor):
|
| 45 |
+
mask = target > 0
|
| 46 |
+
target_mask = target[mask].cpu().numpy()
|
| 47 |
+
pred_mask = pred[mask].cpu().numpy()
|
| 48 |
+
if torch.sum(mask) > 10:
|
| 49 |
+
scale, shift = np.polyfit(pred_mask, target_mask, deg=1)
|
| 50 |
+
if scale < 0:
|
| 51 |
+
scale = torch.median(target[mask]) / (torch.median(pred[mask]) + 1e-8)
|
| 52 |
+
shift = 0
|
| 53 |
+
else:
|
| 54 |
+
scale = 1
|
| 55 |
+
shift = 0
|
| 56 |
+
pred = pred * scale + shift
|
| 57 |
+
return pred, scale
|
| 58 |
+
|
| 59 |
+
def get_prediction(
|
| 60 |
+
model: torch.nn.Module,
|
| 61 |
+
input: torch.tensor,
|
| 62 |
+
cam_model: torch.tensor,
|
| 63 |
+
pad_info: torch.tensor,
|
| 64 |
+
scale_info: torch.tensor,
|
| 65 |
+
gt_depth: torch.tensor,
|
| 66 |
+
normalize_scale: float,
|
| 67 |
+
intrinsic = None,
|
| 68 |
+
clip_range = None,
|
| 69 |
+
flip_aug = False):
|
| 70 |
+
#clip_range = [0, 10],
|
| 71 |
+
#flip_aug = True):
|
| 72 |
+
|
| 73 |
+
data = dict(
|
| 74 |
+
input=input,
|
| 75 |
+
#ref_input=ref_input,
|
| 76 |
+
cam_model=cam_model
|
| 77 |
+
)
|
| 78 |
+
#output = model.module.inference(data)
|
| 79 |
+
output = model.module.inference(data)
|
| 80 |
+
pred_depth, confidence = output['prediction'], output['confidence']
|
| 81 |
+
pred_depth = torch.abs(pred_depth)
|
| 82 |
+
pred_depth = pred_depth.squeeze()
|
| 83 |
+
|
| 84 |
+
if flip_aug == True:
|
| 85 |
+
output_flip = model.module.inference(dict(
|
| 86 |
+
input=torch.flip(input, [3]),
|
| 87 |
+
#ref_input=ref_input,
|
| 88 |
+
cam_model=cam_model
|
| 89 |
+
))
|
| 90 |
+
|
| 91 |
+
if clip_range != None:
|
| 92 |
+
output['prediction'] = torch.clamp(output['prediction'], clip_range[0], clip_range[1])
|
| 93 |
+
output_flip['prediction'] = torch.clamp(output_flip['prediction'], clip_range[0] / normalize_scale * scale_info , clip_range[1] / normalize_scale * scale_info)
|
| 94 |
+
|
| 95 |
+
output['prediction'] = 0.5 * (output['prediction'] + torch.flip(output_flip['prediction'], [3]))
|
| 96 |
+
output['confidence'] = 0.5 * (output['confidence'] + torch.flip(output_flip['confidence'], [3]))
|
| 97 |
+
|
| 98 |
+
output['pad'] = torch.Tensor(pad_info).cuda().unsqueeze(0).int()
|
| 99 |
+
output['mask'] = torch.ones_like(pred_depth).bool().unsqueeze(0).unsqueeze(1)
|
| 100 |
+
output['scale_info'] = scale_info
|
| 101 |
+
if intrinsic is not None:
|
| 102 |
+
output['intrinsic'] = intrinsic
|
| 103 |
+
|
| 104 |
+
pred_depth = pred_depth[pad_info[0]: pred_depth.shape[0]-pad_info[1], pad_info[2]: pred_depth.shape[1]-pad_info[3]]
|
| 105 |
+
pred_depth = torch.nn.functional.interpolate(pred_depth[None, None, :, :], gt_depth.shape, mode='bilinear').squeeze() # to orginal size
|
| 106 |
+
pred_depth = pred_depth * normalize_scale / scale_info
|
| 107 |
+
|
| 108 |
+
if clip_range != None:
|
| 109 |
+
pred_depth = torch.clamp(pred_depth, clip_range[0], clip_range[1])
|
| 110 |
+
|
| 111 |
+
pred_depth_scale, scale = align_scale(pred_depth, gt_depth) #align_scale_shift(pred_depth, gt_depth)
|
| 112 |
+
|
| 113 |
+
if clip_range != None:
|
| 114 |
+
pred_depth_scale = torch.clamp(pred_depth_scale, clip_range[0], clip_range[1])
|
| 115 |
+
|
| 116 |
+
return pred_depth, pred_depth_scale, scale, output
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
# def depth_normal_consistency_optimization(output_dict, consistency_fn):
|
| 120 |
+
# s = torch.zeros_like(output_dict['scale_info'])
|
| 121 |
+
# def closure(x):
|
| 122 |
+
# output_dict['scale'] = torch.exp(x) * output_dict['scale_info']
|
| 123 |
+
# error = consistency_fn(**output_dict)
|
| 124 |
+
# return error + x * x
|
| 125 |
+
|
| 126 |
+
# result = minimize(closure, s, method='newton-exact', disp=1, options={'max_iter':10, 'lr':0.1})
|
| 127 |
+
# return float(torch.exp(-result.x))
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def do_test_with_dataloader(
|
| 131 |
+
model: torch.nn.Module,
|
| 132 |
+
cfg: dict,
|
| 133 |
+
dataloader: torch.utils.data,
|
| 134 |
+
logger: logging.RootLogger,
|
| 135 |
+
is_distributed: bool = True,
|
| 136 |
+
local_rank: int = 0):
|
| 137 |
+
|
| 138 |
+
show_dir = cfg.show_dir
|
| 139 |
+
save_interval = 100
|
| 140 |
+
save_html_path = show_dir + '/index.html'
|
| 141 |
+
save_imgs_dir = show_dir + '/vis'
|
| 142 |
+
os.makedirs(save_imgs_dir, exist_ok=True)
|
| 143 |
+
save_raw_dir = show_dir + '/raw'
|
| 144 |
+
os.makedirs(save_raw_dir, exist_ok=True)
|
| 145 |
+
|
| 146 |
+
normalize_scale = cfg.data_basic.depth_range[1]
|
| 147 |
+
|
| 148 |
+
dam = MetricAverageMeter(cfg.test_metrics)
|
| 149 |
+
dam_scale = MetricAverageMeter(cfg.test_metrics)
|
| 150 |
+
|
| 151 |
+
try:
|
| 152 |
+
depth_range = cfg.data_basic.clip_depth_range if cfg.clip_depth else None
|
| 153 |
+
except:
|
| 154 |
+
depth_range = None
|
| 155 |
+
|
| 156 |
+
for i, data in enumerate(tqdm(dataloader)):
|
| 157 |
+
|
| 158 |
+
# logger.info(f'{local_rank}: {i}/{len(dataloader)}')
|
| 159 |
+
data = to_cuda(data)
|
| 160 |
+
gt_depth = data['target'].squeeze()
|
| 161 |
+
mask = gt_depth > 1e-6
|
| 162 |
+
pad_info = data['pad']
|
| 163 |
+
pred_depth, pred_depth_scale, scale, output = get_prediction(
|
| 164 |
+
model,
|
| 165 |
+
data['input'],
|
| 166 |
+
data['cam_model'],
|
| 167 |
+
pad_info,
|
| 168 |
+
data['scale'],
|
| 169 |
+
gt_depth,
|
| 170 |
+
normalize_scale,
|
| 171 |
+
data['intrinsic'],
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
logger.info(f'{data["filename"]}: {scale}')
|
| 175 |
+
|
| 176 |
+
# optimization
|
| 177 |
+
#if "normal_out_list" in output.keys():
|
| 178 |
+
#scale_opt = depth_normal_consistency_optimization(output, consistency_loss)
|
| 179 |
+
#print('scale', scale_opt, float(scale))
|
| 180 |
+
scale_opt = 1.0
|
| 181 |
+
|
| 182 |
+
# update depth metrics
|
| 183 |
+
dam_scale.update_metrics_gpu(pred_depth_scale, gt_depth, mask, is_distributed)
|
| 184 |
+
dam.update_metrics_gpu(pred_depth, gt_depth, mask, is_distributed)
|
| 185 |
+
|
| 186 |
+
# save evaluation results
|
| 187 |
+
if i % save_interval == 0:
|
| 188 |
+
# save
|
| 189 |
+
rgb = data['input'][:, :, pad_info[0]: data['input'].shape[2]-pad_info[1], pad_info[2]: data['input'].shape[3]-pad_info[3]]
|
| 190 |
+
rgb = torch.nn.functional.interpolate(rgb, gt_depth.shape, mode='bilinear').squeeze()
|
| 191 |
+
max_scale = save_val_imgs(i,
|
| 192 |
+
pred_depth,
|
| 193 |
+
gt_depth,
|
| 194 |
+
rgb,
|
| 195 |
+
data['filename'][0],
|
| 196 |
+
save_imgs_dir,
|
| 197 |
+
)
|
| 198 |
+
logger.info(f'{data["filename"]}, {"max_scale"}: {max_scale}')
|
| 199 |
+
|
| 200 |
+
# # save original depth/rgb
|
| 201 |
+
# save_raw_imgs(
|
| 202 |
+
# pred_depth.cpu().squeeze().numpy(),
|
| 203 |
+
# data['raw_rgb'].cpu().squeeze().numpy(),
|
| 204 |
+
# data['filename'][0],
|
| 205 |
+
# save_raw_dir,
|
| 206 |
+
# )
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
# surface normal metrics
|
| 210 |
+
if "normal_out_list" in output.keys():
|
| 211 |
+
normal_out_list = output['normal_out_list']
|
| 212 |
+
gt_normal = data['normal']
|
| 213 |
+
|
| 214 |
+
pred_normal = normal_out_list[-1][:, :3, :, :] # (B, 3, H, W)
|
| 215 |
+
H, W = pred_normal.shape[2:]
|
| 216 |
+
pred_normal = pred_normal[:, :, pad_info[0]:H-pad_info[1], pad_info[2]:W-pad_info[3]]
|
| 217 |
+
pred_normal = torch.nn.functional.interpolate(pred_normal, size=gt_normal.shape[2:], mode='bilinear', align_corners=True)
|
| 218 |
+
|
| 219 |
+
gt_normal_mask = ~torch.all(gt_normal == 0, dim=1, keepdim=True)
|
| 220 |
+
dam.update_normal_metrics_gpu(pred_normal, gt_normal, gt_normal_mask, cfg.distributed)# save valiad normal
|
| 221 |
+
|
| 222 |
+
if i % save_interval == 0:
|
| 223 |
+
save_normal_val_imgs(iter,
|
| 224 |
+
pred_normal,
|
| 225 |
+
gt_normal,
|
| 226 |
+
rgb, # data['input'],
|
| 227 |
+
'normal_' + data['filename'][0],
|
| 228 |
+
save_imgs_dir,
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
# get validation error
|
| 232 |
+
if main_process():
|
| 233 |
+
eval_error = dam.get_metrics()
|
| 234 |
+
print('>>>>>W/o scale: ', eval_error)
|
| 235 |
+
eval_error_scale = dam_scale.get_metrics()
|
| 236 |
+
print('>>>>>W scale: ', eval_error_scale)
|
| 237 |
+
# disp_eval_error = dam_disp.get_metrics()
|
| 238 |
+
# print('>>>>>Disp to depth: ', disp_eval_error)
|
| 239 |
+
# for i, dam in enumerate(dams):
|
| 240 |
+
# print(f'>>>>>W/o scale gru{i}: ', dam.get_metrics())
|
| 241 |
+
|
| 242 |
+
logger.info(eval_error)
|
| 243 |
+
logger.info(eval_error_scale)
|
| 244 |
+
# logger.info(disp_eval_error)
|
| 245 |
+
# [logger.info(dam.get_metrics()) for dam in dams]
|
external/Metric3D/training/mono/utils/do_train.py
ADDED
|
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import matplotlib.pyplot as plt
|
| 4 |
+
from mono.model.monodepth_model import get_configured_monodepth_model
|
| 5 |
+
from tensorboardX import SummaryWriter
|
| 6 |
+
from mono.utils.comm import TrainingStats
|
| 7 |
+
from mono.utils.avg_meter import MetricAverageMeter
|
| 8 |
+
from mono.utils.running import build_lr_schedule_with_cfg, build_optimizer_with_cfg, load_ckpt, save_ckpt
|
| 9 |
+
from mono.utils.comm import reduce_dict, main_process, get_rank
|
| 10 |
+
from mono.utils.visualization import save_val_imgs, visual_train_data, create_html, save_normal_val_imgs
|
| 11 |
+
import traceback
|
| 12 |
+
from mono.utils.visualization import create_dir_for_validate_meta
|
| 13 |
+
from mono.model.criterion import build_criterions
|
| 14 |
+
from mono.datasets.distributed_sampler import build_dataset_n_sampler_with_cfg, build_data_array
|
| 15 |
+
from mono.utils.logger import setup_logger
|
| 16 |
+
import logging
|
| 17 |
+
from .misc import NativeScalerWithGradNormCount, is_bf16_supported
|
| 18 |
+
import math
|
| 19 |
+
import sys
|
| 20 |
+
import random
|
| 21 |
+
import numpy as np
|
| 22 |
+
import torch.distributed as dist
|
| 23 |
+
import torch.nn.functional as F
|
| 24 |
+
from contextlib import nullcontext
|
| 25 |
+
|
| 26 |
+
def to_cuda(data):
|
| 27 |
+
for k, v in data.items():
|
| 28 |
+
if isinstance(v, torch.Tensor):
|
| 29 |
+
data[k] = v.cuda(non_blocking=True)
|
| 30 |
+
if isinstance(v, list) and len(v)>1 and isinstance(v[0], torch.Tensor):
|
| 31 |
+
for i, l_i in enumerate(v):
|
| 32 |
+
data[k][i] = l_i.cuda(non_blocking=True)
|
| 33 |
+
return data
|
| 34 |
+
|
| 35 |
+
def do_train(local_rank: int, cfg: dict):
|
| 36 |
+
|
| 37 |
+
logger = setup_logger(cfg.log_file)
|
| 38 |
+
|
| 39 |
+
# build criterions
|
| 40 |
+
criterions = build_criterions(cfg)
|
| 41 |
+
|
| 42 |
+
# build model
|
| 43 |
+
model = get_configured_monodepth_model(cfg,
|
| 44 |
+
criterions,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
# log model state_dict
|
| 48 |
+
if main_process():
|
| 49 |
+
logger.info(model.state_dict().keys())
|
| 50 |
+
|
| 51 |
+
# build datasets
|
| 52 |
+
train_dataset, train_sampler = build_dataset_n_sampler_with_cfg(cfg, 'train')
|
| 53 |
+
if 'multi_dataset_eval' in cfg.evaluation and cfg.evaluation.multi_dataset_eval:
|
| 54 |
+
val_dataset = build_data_array(cfg, 'val')
|
| 55 |
+
else:
|
| 56 |
+
val_dataset, val_sampler = build_dataset_n_sampler_with_cfg(cfg, 'val')
|
| 57 |
+
# build data loaders
|
| 58 |
+
g = torch.Generator()
|
| 59 |
+
g.manual_seed(cfg.seed + cfg.dist_params.global_rank)
|
| 60 |
+
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,
|
| 61 |
+
batch_size=cfg.batchsize_per_gpu,
|
| 62 |
+
num_workers=cfg.thread_per_gpu,
|
| 63 |
+
sampler=train_sampler,
|
| 64 |
+
drop_last=True,
|
| 65 |
+
pin_memory=True,
|
| 66 |
+
generator=g,)
|
| 67 |
+
# collate_fn=collate_fn)
|
| 68 |
+
if isinstance(val_dataset, list):
|
| 69 |
+
val_dataloader = [torch.utils.data.DataLoader(dataset=val_dataset,
|
| 70 |
+
batch_size=1,
|
| 71 |
+
num_workers=0,
|
| 72 |
+
sampler=torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False),
|
| 73 |
+
drop_last=True,
|
| 74 |
+
pin_memory=True,) for val_group in val_dataset for val_dataset in val_group]
|
| 75 |
+
else:
|
| 76 |
+
val_dataloader = torch.utils.data.DataLoader(dataset=val_dataset,
|
| 77 |
+
batch_size=1,
|
| 78 |
+
num_workers=0,
|
| 79 |
+
sampler=val_sampler,
|
| 80 |
+
drop_last=True,
|
| 81 |
+
pin_memory=True,)
|
| 82 |
+
|
| 83 |
+
# build schedule
|
| 84 |
+
lr_scheduler = build_lr_schedule_with_cfg(cfg)
|
| 85 |
+
optimizer = build_optimizer_with_cfg(cfg, model)
|
| 86 |
+
|
| 87 |
+
# config distributed training
|
| 88 |
+
if cfg.distributed:
|
| 89 |
+
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
|
| 90 |
+
model = torch.nn.parallel.DistributedDataParallel(model.cuda(),
|
| 91 |
+
device_ids=[local_rank],
|
| 92 |
+
output_device=local_rank,
|
| 93 |
+
find_unused_parameters=False)
|
| 94 |
+
else:
|
| 95 |
+
model = torch.nn.DataParallel(model.cuda())
|
| 96 |
+
|
| 97 |
+
# init automatic mix precision training
|
| 98 |
+
# if 'AMP' in cfg.runner.type:
|
| 99 |
+
# loss_scaler = NativeScalerWithGradNormCount()
|
| 100 |
+
# else:
|
| 101 |
+
# loss_scaler = None
|
| 102 |
+
loss_scaler = None
|
| 103 |
+
|
| 104 |
+
# load ckpt
|
| 105 |
+
if cfg.load_from and cfg.resume_from is None:
|
| 106 |
+
model, _, _, loss_scaler = load_ckpt(cfg.load_from, model, optimizer=None, scheduler=None, strict_match=False, loss_scaler=loss_scaler)
|
| 107 |
+
elif cfg.resume_from:
|
| 108 |
+
model, optimizer, lr_scheduler, loss_scaler = load_ckpt(
|
| 109 |
+
cfg.resume_from,
|
| 110 |
+
model,
|
| 111 |
+
optimizer=optimizer,
|
| 112 |
+
scheduler=lr_scheduler,
|
| 113 |
+
strict_match=False,
|
| 114 |
+
loss_scaler=loss_scaler)
|
| 115 |
+
|
| 116 |
+
if cfg.runner.type == 'IterBasedRunner':
|
| 117 |
+
train_by_iters(cfg,
|
| 118 |
+
model,
|
| 119 |
+
optimizer,
|
| 120 |
+
lr_scheduler,
|
| 121 |
+
train_dataloader,
|
| 122 |
+
val_dataloader,
|
| 123 |
+
)
|
| 124 |
+
elif cfg.runner.type == 'IterBasedRunner_MultiSize':
|
| 125 |
+
train_by_iters_multisize(cfg,
|
| 126 |
+
model,
|
| 127 |
+
optimizer,
|
| 128 |
+
lr_scheduler,
|
| 129 |
+
train_dataloader,
|
| 130 |
+
val_dataloader,
|
| 131 |
+
)
|
| 132 |
+
elif cfg.runner.type == 'IterBasedRunner_AMP':
|
| 133 |
+
train_by_iters_amp(
|
| 134 |
+
cfg = cfg,
|
| 135 |
+
model=model,
|
| 136 |
+
optimizer=optimizer,
|
| 137 |
+
lr_scheduler=lr_scheduler,
|
| 138 |
+
train_dataloader=train_dataloader,
|
| 139 |
+
val_dataloader=val_dataloader,
|
| 140 |
+
loss_scaler=loss_scaler
|
| 141 |
+
)
|
| 142 |
+
elif cfg.runner.type == 'IterBasedRunner_AMP_MultiSize':
|
| 143 |
+
train_by_iters_amp_multisize(
|
| 144 |
+
cfg = cfg,
|
| 145 |
+
model=model,
|
| 146 |
+
optimizer=optimizer,
|
| 147 |
+
lr_scheduler=lr_scheduler,
|
| 148 |
+
train_dataloader=train_dataloader,
|
| 149 |
+
val_dataloader=val_dataloader,
|
| 150 |
+
loss_scaler=loss_scaler
|
| 151 |
+
)
|
| 152 |
+
elif cfg.runner.type == 'EpochBasedRunner':
|
| 153 |
+
raise RuntimeError('It is not supported currently. :)')
|
| 154 |
+
else:
|
| 155 |
+
raise RuntimeError('It is not supported currently. :)')
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def train_by_iters(cfg, model, optimizer, lr_scheduler, train_dataloader, val_dataloader):
|
| 159 |
+
"""
|
| 160 |
+
Do the training by iterations.
|
| 161 |
+
"""
|
| 162 |
+
logger = logging.getLogger()
|
| 163 |
+
tb_logger = None
|
| 164 |
+
if cfg.use_tensorboard and main_process():
|
| 165 |
+
tb_logger = SummaryWriter(cfg.tensorboard_dir)
|
| 166 |
+
if main_process():
|
| 167 |
+
training_stats = TrainingStats(log_period=cfg.log_interval, tensorboard_logger=tb_logger)
|
| 168 |
+
|
| 169 |
+
lr_scheduler.before_run(optimizer)
|
| 170 |
+
|
| 171 |
+
# set training steps
|
| 172 |
+
max_iters = cfg.runner.max_iters
|
| 173 |
+
start_iter = lr_scheduler._step_count
|
| 174 |
+
|
| 175 |
+
save_interval = cfg.checkpoint_config.interval
|
| 176 |
+
eval_interval = cfg.evaluation.interval
|
| 177 |
+
epoch = 0
|
| 178 |
+
logger.info('Create iterator.')
|
| 179 |
+
dataloader_iterator = iter(train_dataloader)
|
| 180 |
+
|
| 181 |
+
val_err = {}
|
| 182 |
+
logger.info('Start training.')
|
| 183 |
+
|
| 184 |
+
try:
|
| 185 |
+
# for step in range(start_iter, max_iters):
|
| 186 |
+
# keep same step in all processes, avoid stuck during eval barrier
|
| 187 |
+
step = start_iter
|
| 188 |
+
while step < max_iters:
|
| 189 |
+
if main_process():
|
| 190 |
+
training_stats.IterTic()
|
| 191 |
+
|
| 192 |
+
# get the data batch
|
| 193 |
+
try:
|
| 194 |
+
data = next(dataloader_iterator)
|
| 195 |
+
except StopIteration:
|
| 196 |
+
dataloader_iterator = iter(train_dataloader)
|
| 197 |
+
data = next(dataloader_iterator)
|
| 198 |
+
except Exception as e:
|
| 199 |
+
logger.info('When load training data: ', e)
|
| 200 |
+
continue
|
| 201 |
+
except:
|
| 202 |
+
logger.info('Some training data errors exist in the current iter!')
|
| 203 |
+
continue
|
| 204 |
+
data = to_cuda(data)
|
| 205 |
+
# set random crop size
|
| 206 |
+
# if step % 10 == 0:
|
| 207 |
+
# set_random_crop_size_for_iter(train_dataloader, step, size_sample_list[step])
|
| 208 |
+
|
| 209 |
+
# check training data
|
| 210 |
+
#for i in range(data['target'].shape[0]):
|
| 211 |
+
# if 'DDAD' in data['dataset'][i] or \
|
| 212 |
+
# 'Lyft' in data['dataset'][i] or \
|
| 213 |
+
# 'DSEC' in data['dataset'][i] or \
|
| 214 |
+
# 'Argovers2' in data['dataset'][i]:
|
| 215 |
+
# replace = True
|
| 216 |
+
# else:
|
| 217 |
+
# replace = False
|
| 218 |
+
#visual_train_data(data['target'][i, ...], data['input'][i,...], data['filename'][i], cfg.work_dir, replace=replace)
|
| 219 |
+
|
| 220 |
+
# forward
|
| 221 |
+
pred_depth, losses_dict, conf = model(data)
|
| 222 |
+
|
| 223 |
+
optimizer.zero_grad()
|
| 224 |
+
losses_dict['total_loss'].backward()
|
| 225 |
+
# if step > 100 and step % 10 == 0:
|
| 226 |
+
# for param in model.parameters():
|
| 227 |
+
# print(param.grad.max(), torch.norm(param.grad))
|
| 228 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
|
| 229 |
+
optimizer.step()
|
| 230 |
+
|
| 231 |
+
# reduce losses over all GPUs for logging purposes
|
| 232 |
+
loss_dict_reduced = reduce_dict(losses_dict)
|
| 233 |
+
|
| 234 |
+
lr_scheduler.after_train_iter(optimizer)
|
| 235 |
+
if main_process():
|
| 236 |
+
training_stats.update_iter_stats(loss_dict_reduced)
|
| 237 |
+
training_stats.IterToc()
|
| 238 |
+
training_stats.log_iter_stats(step, optimizer, max_iters, val_err)
|
| 239 |
+
|
| 240 |
+
# validate the model
|
| 241 |
+
if cfg.evaluation.online_eval and \
|
| 242 |
+
(step+1) % eval_interval == 0 and \
|
| 243 |
+
val_dataloader is not None:
|
| 244 |
+
if isinstance(val_dataloader, list):
|
| 245 |
+
val_err = validate_multiple_dataset(cfg, step+1, model, val_dataloader, tb_logger)
|
| 246 |
+
else:
|
| 247 |
+
val_err = validate(cfg, step+1, model, val_dataloader, tb_logger)
|
| 248 |
+
if main_process():
|
| 249 |
+
training_stats.tb_log_stats(val_err, step)
|
| 250 |
+
|
| 251 |
+
# save checkpoint
|
| 252 |
+
if main_process():
|
| 253 |
+
if ((step+1) % save_interval == 0) or ((step+1)==max_iters):
|
| 254 |
+
save_ckpt(cfg, model, optimizer, lr_scheduler, step+1, epoch)
|
| 255 |
+
|
| 256 |
+
step += 1
|
| 257 |
+
|
| 258 |
+
except (RuntimeError, KeyboardInterrupt):
|
| 259 |
+
stack_trace = traceback.format_exc()
|
| 260 |
+
print(stack_trace)
|
| 261 |
+
|
| 262 |
+
def train_by_iters_amp(cfg, model, optimizer, lr_scheduler, train_dataloader, val_dataloader, loss_scaler):
|
| 263 |
+
"""
|
| 264 |
+
Do the training by iterations.
|
| 265 |
+
Mix precision is employed.
|
| 266 |
+
"""
|
| 267 |
+
# set up logger
|
| 268 |
+
tb_logger = None
|
| 269 |
+
if cfg.use_tensorboard and main_process():
|
| 270 |
+
tb_logger = SummaryWriter(cfg.tensorboard_dir)
|
| 271 |
+
logger = logging.getLogger()
|
| 272 |
+
# training status
|
| 273 |
+
if main_process():
|
| 274 |
+
training_stats = TrainingStats(log_period=cfg.log_interval, tensorboard_logger=tb_logger)
|
| 275 |
+
|
| 276 |
+
# learning schedule
|
| 277 |
+
lr_scheduler.before_run(optimizer)
|
| 278 |
+
|
| 279 |
+
# set training steps
|
| 280 |
+
max_iters = cfg.runner.max_iters
|
| 281 |
+
start_iter = lr_scheduler._step_count
|
| 282 |
+
|
| 283 |
+
save_interval = cfg.checkpoint_config.interval
|
| 284 |
+
eval_interval = cfg.evaluation.interval
|
| 285 |
+
epoch = 0
|
| 286 |
+
|
| 287 |
+
# If it's too slow try lowering num_worker
|
| 288 |
+
# see https://discuss.pytorch.org/t/define-iterator-on-dataloader-is-very-slow/52238
|
| 289 |
+
logger.info('Create iterator.')
|
| 290 |
+
dataloader_iterator = iter(train_dataloader)
|
| 291 |
+
|
| 292 |
+
val_err = {}
|
| 293 |
+
# torch.cuda.empty_cache()
|
| 294 |
+
logger.info('Start training.')
|
| 295 |
+
|
| 296 |
+
try:
|
| 297 |
+
acc_batch = cfg.acc_batch
|
| 298 |
+
except:
|
| 299 |
+
acc_batch = 1
|
| 300 |
+
|
| 301 |
+
try:
|
| 302 |
+
# for step in range(start_iter, max_iters):
|
| 303 |
+
# keep same step in all processes, avoid stuck during eval barrier
|
| 304 |
+
step = start_iter * acc_batch
|
| 305 |
+
#while step < max_iters:
|
| 306 |
+
while True:
|
| 307 |
+
|
| 308 |
+
if main_process():
|
| 309 |
+
training_stats.IterTic()
|
| 310 |
+
|
| 311 |
+
# get the data batch
|
| 312 |
+
try:
|
| 313 |
+
data = next(dataloader_iterator)
|
| 314 |
+
except StopIteration:
|
| 315 |
+
dataloader_iterator = iter(train_dataloader)
|
| 316 |
+
data = next(dataloader_iterator)
|
| 317 |
+
except Exception as e:
|
| 318 |
+
logger.info('When load training data: ', e)
|
| 319 |
+
continue
|
| 320 |
+
except:
|
| 321 |
+
logger.info('Some training data errors exist in the current iter!')
|
| 322 |
+
continue
|
| 323 |
+
|
| 324 |
+
data = to_cuda(data)
|
| 325 |
+
|
| 326 |
+
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
|
| 327 |
+
pred_depth, losses_dict, conf = model(data)
|
| 328 |
+
|
| 329 |
+
total_loss = losses_dict['total_loss'] / acc_batch
|
| 330 |
+
|
| 331 |
+
if not math.isfinite(total_loss):
|
| 332 |
+
logger.info("Loss is {}, skiping this batch training".format(total_loss))
|
| 333 |
+
continue
|
| 334 |
+
|
| 335 |
+
# optimize, backward
|
| 336 |
+
if (step+1-start_iter) % acc_batch == 0:
|
| 337 |
+
optimizer.zero_grad()
|
| 338 |
+
if loss_scaler == None:
|
| 339 |
+
total_loss.backward()
|
| 340 |
+
try:
|
| 341 |
+
if (step+1-start_iter) % acc_batch == 0:
|
| 342 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), 2.5, error_if_nonfinite=True)
|
| 343 |
+
optimizer.step()
|
| 344 |
+
except:
|
| 345 |
+
print('NAN gradient, skipping optimizer.step() for this round...')
|
| 346 |
+
else:
|
| 347 |
+
loss_scaler(total_loss, optimizer, clip_grad=5, parameters=model.parameters(), update_grad=True)
|
| 348 |
+
|
| 349 |
+
# reduce losses over all GPUs for logging purposes
|
| 350 |
+
if (step+1-start_iter) % acc_batch == 0:
|
| 351 |
+
loss_dict_reduced = reduce_dict(losses_dict)
|
| 352 |
+
lr_scheduler.after_train_iter(optimizer)
|
| 353 |
+
|
| 354 |
+
if main_process():
|
| 355 |
+
training_stats.update_iter_stats(loss_dict_reduced)
|
| 356 |
+
training_stats.IterToc()
|
| 357 |
+
training_stats.log_iter_stats(step//acc_batch, optimizer, max_iters, val_err)
|
| 358 |
+
|
| 359 |
+
# validate the model
|
| 360 |
+
if cfg.evaluation.online_eval and \
|
| 361 |
+
((step+acc_batch)//acc_batch) % eval_interval == 0 and \
|
| 362 |
+
val_dataloader is not None:
|
| 363 |
+
# if True:
|
| 364 |
+
if isinstance(val_dataloader, list):
|
| 365 |
+
val_err = validate_multiple_dataset(cfg, ((step+acc_batch)//acc_batch), model, val_dataloader, tb_logger)
|
| 366 |
+
else:
|
| 367 |
+
val_err = validate(cfg, ((step+acc_batch)//acc_batch), model, val_dataloader, tb_logger)
|
| 368 |
+
if main_process():
|
| 369 |
+
training_stats.tb_log_stats(val_err, step)
|
| 370 |
+
|
| 371 |
+
# save checkpoint
|
| 372 |
+
if main_process():
|
| 373 |
+
if (((step+acc_batch)//acc_batch) % save_interval == 0) or (((step+acc_batch)//acc_batch)==max_iters):
|
| 374 |
+
save_ckpt(cfg, model, optimizer, lr_scheduler, ((step+acc_batch)//acc_batch), epoch, loss_scaler=loss_scaler)
|
| 375 |
+
|
| 376 |
+
step += 1
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
except (RuntimeError, KeyboardInterrupt):
|
| 380 |
+
stack_trace = traceback.format_exc()
|
| 381 |
+
print(stack_trace)
|
| 382 |
+
|
| 383 |
+
def validate_multiple_dataset(cfg, iter, model, val_dataloaders, tb_logger):
|
| 384 |
+
val_errs = {}
|
| 385 |
+
for val_dataloader in val_dataloaders:
|
| 386 |
+
val_err = validate(cfg, iter, model, val_dataloader, tb_logger)
|
| 387 |
+
val_errs.update(val_err)
|
| 388 |
+
# mean of all dataset
|
| 389 |
+
mean_val_err = {}
|
| 390 |
+
for k, v in val_errs.items():
|
| 391 |
+
metric = 'AllData_eval/' + k.split('/')[-1]
|
| 392 |
+
if metric not in mean_val_err.keys():
|
| 393 |
+
mean_val_err[metric] = 0
|
| 394 |
+
mean_val_err[metric] += v / len(val_dataloaders)
|
| 395 |
+
val_errs.update(mean_val_err)
|
| 396 |
+
|
| 397 |
+
return val_errs
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
def validate(cfg, iter, model, val_dataloader, tb_logger):
|
| 401 |
+
"""
|
| 402 |
+
Validate the model on single dataset
|
| 403 |
+
"""
|
| 404 |
+
model.eval()
|
| 405 |
+
dist.barrier()
|
| 406 |
+
logger = logging.getLogger()
|
| 407 |
+
# prepare dir for visualization data
|
| 408 |
+
save_val_meta_data_dir = create_dir_for_validate_meta(cfg.work_dir, iter)
|
| 409 |
+
# save_html_path = save_val_meta_data_dir + '.html'
|
| 410 |
+
dataset_name = val_dataloader.dataset.data_name
|
| 411 |
+
|
| 412 |
+
save_point = max(int(len(val_dataloader) / 5), 1)
|
| 413 |
+
# save_point = 2
|
| 414 |
+
# depth metric meter
|
| 415 |
+
dam = MetricAverageMeter(cfg.evaluation.metrics)
|
| 416 |
+
# dam_disp = MetricAverageMeter([m for m in cfg.evaluation.metrics if m[:6]!='normal'])
|
| 417 |
+
for i, data in enumerate(val_dataloader):
|
| 418 |
+
if i % 10 == 0:
|
| 419 |
+
logger.info(f'Validation step on {dataset_name}: {i}')
|
| 420 |
+
data = to_cuda(data)
|
| 421 |
+
output = model.module.inference(data)
|
| 422 |
+
pred_depth = output['prediction']
|
| 423 |
+
pred_depth = pred_depth.squeeze()
|
| 424 |
+
gt_depth = data['target'].cuda(non_blocking=True).squeeze()
|
| 425 |
+
|
| 426 |
+
pad = data['pad'].squeeze()
|
| 427 |
+
H, W = pred_depth.shape
|
| 428 |
+
pred_depth = pred_depth[pad[0]:H-pad[1], pad[2]:W-pad[3]]
|
| 429 |
+
gt_depth = gt_depth[pad[0]:H-pad[1], pad[2]:W-pad[3]]
|
| 430 |
+
rgb = data['input'][0, :, pad[0]:H-pad[1], pad[2]:W-pad[3]]
|
| 431 |
+
mask = gt_depth > 0
|
| 432 |
+
#pred_depth_resize = cv2.resize(pred_depth.cpu().numpy(), (torch.squeeze(data['B_raw']).shape[1], torch.squeeze(data['B_raw']).shape[0]))
|
| 433 |
+
dam.update_metrics_gpu(pred_depth, gt_depth, mask, cfg.distributed)
|
| 434 |
+
|
| 435 |
+
# save evaluation results
|
| 436 |
+
if i%save_point == 0 and main_process():
|
| 437 |
+
save_val_imgs(iter,
|
| 438 |
+
pred_depth,
|
| 439 |
+
gt_depth,
|
| 440 |
+
rgb, # data['input'],
|
| 441 |
+
dataset_name + '_' + data['filename'][0],
|
| 442 |
+
save_val_meta_data_dir,
|
| 443 |
+
tb_logger=tb_logger)
|
| 444 |
+
|
| 445 |
+
## surface normal
|
| 446 |
+
if "normal_out_list" in output.keys():
|
| 447 |
+
normal_out_list = output['normal_out_list']
|
| 448 |
+
pred_normal = normal_out_list[-1][:, :3, :, :] # (B, 3, H, W)
|
| 449 |
+
gt_normal = data['normal'].cuda(non_blocking=True)
|
| 450 |
+
# if pred_normal.shape != gt_normal.shape:
|
| 451 |
+
# pred_normal = F.interpolate(pred_normal, size=[gt_normal.size(2), gt_normal.size(3)], mode='bilinear', align_corners=True)
|
| 452 |
+
|
| 453 |
+
H, W = pred_normal.shape[2:]
|
| 454 |
+
pred_normal = pred_normal[:, :, pad[0]:H-pad[1], pad[2]:W-pad[3]]
|
| 455 |
+
gt_normal = gt_normal[:, :, pad[0]:H-pad[1], pad[2]:W-pad[3]]
|
| 456 |
+
gt_normal_mask = ~torch.all(gt_normal == 0, dim=1, keepdim=True)
|
| 457 |
+
dam.update_normal_metrics_gpu(pred_normal, gt_normal, gt_normal_mask, cfg.distributed)
|
| 458 |
+
|
| 459 |
+
# save valiad normal
|
| 460 |
+
if i%save_point == 0 and main_process():
|
| 461 |
+
save_normal_val_imgs(iter,
|
| 462 |
+
pred_normal,
|
| 463 |
+
gt_normal,
|
| 464 |
+
rgb, # data['input'],
|
| 465 |
+
dataset_name + '_normal_' + data['filename'][0],
|
| 466 |
+
save_val_meta_data_dir,
|
| 467 |
+
tb_logger=tb_logger)
|
| 468 |
+
|
| 469 |
+
# create html for visualization
|
| 470 |
+
merged_rgb_pred_gt = os.path.join(save_val_meta_data_dir, '*_merge.jpg')
|
| 471 |
+
name2path = dict(merg=merged_rgb_pred_gt) #dict(rgbs=rgbs, pred=pred, gt=gt)
|
| 472 |
+
# if main_process():
|
| 473 |
+
# create_html(name2path, save_path=save_html_path, size=(256*3, 512))
|
| 474 |
+
|
| 475 |
+
# get validation error
|
| 476 |
+
eval_error = dam.get_metrics()
|
| 477 |
+
eval_error = {f'{dataset_name}_eval/{k}': v for k,v in eval_error.items()}
|
| 478 |
+
# eval_disp_error = {f'{dataset_name}_eval/disp_{k}': v for k,v in dam_disp.get_metrics().items()}
|
| 479 |
+
# eval_error.update(eval_disp_error)
|
| 480 |
+
|
| 481 |
+
model.train()
|
| 482 |
+
|
| 483 |
+
if 'exclude' in cfg.evaluation and dataset_name in cfg.evaluation.exclude:
|
| 484 |
+
return {}
|
| 485 |
+
return eval_error
|
| 486 |
+
|
| 487 |
+
def set_random_crop_size_for_iter(dataloader: torch.utils.data.dataloader.DataLoader, iter: int, size_pool=None):
|
| 488 |
+
if size_pool is None:
|
| 489 |
+
size_pool = [
|
| 490 |
+
# [504, 504], [560, 1008], [840, 1512], [1120, 2016],
|
| 491 |
+
[560, 1008], [840, 1512], [1120, 2016],
|
| 492 |
+
# [480, 768], [480, 960],
|
| 493 |
+
# [480, 992], [480, 1024],
|
| 494 |
+
# [480, 1120],
|
| 495 |
+
# [480, 1280],
|
| 496 |
+
# [480, 1312],
|
| 497 |
+
# [512, 512], [512, 640],
|
| 498 |
+
# [512, 960],
|
| 499 |
+
# [512, 992],
|
| 500 |
+
# [512, 1024], [512, 1120],
|
| 501 |
+
# [512, 1216],
|
| 502 |
+
# [512, 1280],
|
| 503 |
+
# [576, 640], [576, 960],
|
| 504 |
+
# [576, 992],
|
| 505 |
+
# [576, 1024],
|
| 506 |
+
# [608, 608], [608, 640],
|
| 507 |
+
# [608, 960], [608, 1024],
|
| 508 |
+
]
|
| 509 |
+
random.seed(iter)
|
| 510 |
+
sample = random.choice(size_pool)
|
| 511 |
+
# idx = (iter // 10) % len(size_pool)
|
| 512 |
+
#sample = size_pool[size_idx]
|
| 513 |
+
|
| 514 |
+
# random.seed(iter)
|
| 515 |
+
# flg = random.random() <= 1.0
|
| 516 |
+
# if flg:
|
| 517 |
+
crop_size = sample
|
| 518 |
+
# else:
|
| 519 |
+
# crop_size = [sample[1], sample[0]]
|
| 520 |
+
|
| 521 |
+
# set crop size for each dataset
|
| 522 |
+
datasets_groups = len(dataloader.dataset.datasets)
|
| 523 |
+
for i in range(datasets_groups):
|
| 524 |
+
for j in range(len(dataloader.dataset.datasets[i].datasets)):
|
| 525 |
+
dataloader.dataset.datasets[i].datasets[j].set_random_crop_size(crop_size)
|
| 526 |
+
return crop_size
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
|
external/Metric3D/training/mono/utils/inverse_warp.py
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
|
| 4 |
+
pixel_coords = None
|
| 5 |
+
|
| 6 |
+
def set_id_grid(depth):
|
| 7 |
+
global pixel_coords
|
| 8 |
+
b, h, w = depth.size()
|
| 9 |
+
i_range = torch.arange(0, h).view(1, h, 1).expand(
|
| 10 |
+
1, h, w).type_as(depth) # [1, H, W]
|
| 11 |
+
j_range = torch.arange(0, w).view(1, 1, w).expand(
|
| 12 |
+
1, h, w).type_as(depth) # [1, H, W]
|
| 13 |
+
ones = torch.ones(1, h, w).type_as(depth)
|
| 14 |
+
|
| 15 |
+
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def check_sizes(input, input_name, expected):
|
| 19 |
+
condition = [input.ndimension() == len(expected)]
|
| 20 |
+
for i, size in enumerate(expected):
|
| 21 |
+
if size.isdigit():
|
| 22 |
+
condition.append(input.size(i) == int(size))
|
| 23 |
+
assert(all(condition)), "wrong size for {}, expected {}, got {}".format(
|
| 24 |
+
input_name, 'x'.join(expected), list(input.size()))
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def pixel2cam(depth, intrinsics_inv):
|
| 28 |
+
global pixel_coords
|
| 29 |
+
"""Transform coordinates in the pixel frame to the camera frame.
|
| 30 |
+
Args:
|
| 31 |
+
depth: depth maps -- [B, H, W]
|
| 32 |
+
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
|
| 33 |
+
Returns:
|
| 34 |
+
array of (u,v,1) cam coordinates -- [B, 3, H, W]
|
| 35 |
+
"""
|
| 36 |
+
b, h, w = depth.size()
|
| 37 |
+
if (pixel_coords is None) or pixel_coords.size(2) < h:
|
| 38 |
+
set_id_grid(depth)
|
| 39 |
+
current_pixel_coords = pixel_coords[:, :, :h, :w].expand(
|
| 40 |
+
b, 3, h, w).reshape(b, 3, -1) # [B, 3, H*W]
|
| 41 |
+
cam_coords = (intrinsics_inv @ current_pixel_coords).reshape(b, 3, h, w)
|
| 42 |
+
out = depth.unsqueeze(1) * cam_coords
|
| 43 |
+
return out
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
|
| 47 |
+
"""Transform coordinates in the camera frame to the pixel frame.
|
| 48 |
+
Args:
|
| 49 |
+
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
|
| 50 |
+
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
|
| 51 |
+
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
|
| 52 |
+
Returns:
|
| 53 |
+
array of [-1,1] coordinates -- [B, 2, H, W]
|
| 54 |
+
"""
|
| 55 |
+
b, _, h, w = cam_coords.size()
|
| 56 |
+
cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]
|
| 57 |
+
if proj_c2p_rot is not None:
|
| 58 |
+
pcoords = proj_c2p_rot @ cam_coords_flat
|
| 59 |
+
else:
|
| 60 |
+
pcoords = cam_coords_flat
|
| 61 |
+
|
| 62 |
+
if proj_c2p_tr is not None:
|
| 63 |
+
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
|
| 64 |
+
X = pcoords[:, 0]
|
| 65 |
+
Y = pcoords[:, 1]
|
| 66 |
+
Z = pcoords[:, 2].clamp(min=1e-3)
|
| 67 |
+
|
| 68 |
+
# Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
|
| 69 |
+
X_norm = 2*(X / Z)/(w-1) - 1
|
| 70 |
+
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
|
| 71 |
+
|
| 72 |
+
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
|
| 73 |
+
return pixel_coords.reshape(b, h, w, 2)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def euler2mat(angle):
|
| 77 |
+
"""Convert euler angles to rotation matrix.
|
| 78 |
+
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
|
| 79 |
+
Args:
|
| 80 |
+
angle: rotation angle along 3 axis (in radians) -- size = [B, 3]
|
| 81 |
+
Returns:
|
| 82 |
+
Rotation matrix corresponding to the euler angles -- size = [B, 3, 3]
|
| 83 |
+
"""
|
| 84 |
+
B = angle.size(0)
|
| 85 |
+
x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]
|
| 86 |
+
|
| 87 |
+
cosz = torch.cos(z)
|
| 88 |
+
sinz = torch.sin(z)
|
| 89 |
+
|
| 90 |
+
zeros = z.detach()*0
|
| 91 |
+
ones = zeros.detach()+1
|
| 92 |
+
zmat = torch.stack([cosz, -sinz, zeros,
|
| 93 |
+
sinz, cosz, zeros,
|
| 94 |
+
zeros, zeros, ones], dim=1).reshape(B, 3, 3)
|
| 95 |
+
|
| 96 |
+
cosy = torch.cos(y)
|
| 97 |
+
siny = torch.sin(y)
|
| 98 |
+
|
| 99 |
+
ymat = torch.stack([cosy, zeros, siny,
|
| 100 |
+
zeros, ones, zeros,
|
| 101 |
+
-siny, zeros, cosy], dim=1).reshape(B, 3, 3)
|
| 102 |
+
|
| 103 |
+
cosx = torch.cos(x)
|
| 104 |
+
sinx = torch.sin(x)
|
| 105 |
+
|
| 106 |
+
xmat = torch.stack([ones, zeros, zeros,
|
| 107 |
+
zeros, cosx, -sinx,
|
| 108 |
+
zeros, sinx, cosx], dim=1).reshape(B, 3, 3)
|
| 109 |
+
|
| 110 |
+
rotMat = xmat @ ymat @ zmat
|
| 111 |
+
return rotMat
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def quat2mat(quat):
|
| 115 |
+
"""Convert quaternion coefficients to rotation matrix.
|
| 116 |
+
Args:
|
| 117 |
+
quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3]
|
| 118 |
+
Returns:
|
| 119 |
+
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
|
| 120 |
+
"""
|
| 121 |
+
norm_quat = torch.cat([quat[:, :1].detach()*0 + 1, quat], dim=1)
|
| 122 |
+
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
|
| 123 |
+
w, x, y, z = norm_quat[:, 0], norm_quat[:,
|
| 124 |
+
1], norm_quat[:, 2], norm_quat[:, 3]
|
| 125 |
+
|
| 126 |
+
B = quat.size(0)
|
| 127 |
+
|
| 128 |
+
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
|
| 129 |
+
wx, wy, wz = w*x, w*y, w*z
|
| 130 |
+
xy, xz, yz = x*y, x*z, y*z
|
| 131 |
+
|
| 132 |
+
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
|
| 133 |
+
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
|
| 134 |
+
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).reshape(B, 3, 3)
|
| 135 |
+
return rotMat
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def pose_vec2mat(vec, rotation_mode='euler'):
|
| 139 |
+
"""
|
| 140 |
+
Convert 6DoF parameters to transformation matrix.
|
| 141 |
+
Args:s
|
| 142 |
+
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
|
| 143 |
+
Returns:
|
| 144 |
+
A transformation matrix -- [B, 3, 4]
|
| 145 |
+
"""
|
| 146 |
+
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
|
| 147 |
+
rot = vec[:, 3:]
|
| 148 |
+
if rotation_mode == 'euler':
|
| 149 |
+
rot_mat = euler2mat(rot) # [B, 3, 3]
|
| 150 |
+
elif rotation_mode == 'quat':
|
| 151 |
+
rot_mat = quat2mat(rot) # [B, 3, 3]
|
| 152 |
+
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]
|
| 153 |
+
return transform_mat
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def inverse_warp(img, depth, pose, intrinsics, rotation_mode='euler', padding_mode='zeros'):
|
| 157 |
+
"""
|
| 158 |
+
Inverse warp a source image to the target image plane.
|
| 159 |
+
Args:
|
| 160 |
+
img: the source image (where to sample pixels) -- [B, 3, H, W]
|
| 161 |
+
depth: depth map of the target image -- [B, H, W]
|
| 162 |
+
pose: 6DoF pose parameters from target to source -- [B, 6]
|
| 163 |
+
intrinsics: camera intrinsic matrix -- [B, 3, 3]
|
| 164 |
+
Returns:
|
| 165 |
+
projected_img: Source image warped to the target image plane
|
| 166 |
+
valid_points: Boolean array indicating point validity
|
| 167 |
+
"""
|
| 168 |
+
check_sizes(img, 'img', 'B3HW')
|
| 169 |
+
check_sizes(depth, 'depth', 'BHW')
|
| 170 |
+
check_sizes(pose, 'pose', 'B6')
|
| 171 |
+
check_sizes(intrinsics, 'intrinsics', 'B33')
|
| 172 |
+
|
| 173 |
+
batch_size, _, img_height, img_width = img.size()
|
| 174 |
+
|
| 175 |
+
cam_coords = pixel2cam(depth, intrinsics.inverse()) # [B,3,H,W]
|
| 176 |
+
|
| 177 |
+
pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4]
|
| 178 |
+
|
| 179 |
+
# Get projection matrix for tgt camera frame to source pixel frame
|
| 180 |
+
proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]
|
| 181 |
+
|
| 182 |
+
rot, tr = proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:]
|
| 183 |
+
src_pixel_coords = cam2pixel(
|
| 184 |
+
cam_coords, rot, tr, padding_mode) # [B,H,W,2]
|
| 185 |
+
projected_img = F.grid_sample(
|
| 186 |
+
img, src_pixel_coords, padding_mode=padding_mode)
|
| 187 |
+
|
| 188 |
+
valid_points = src_pixel_coords.abs().max(dim=-1)[0] <= 1
|
| 189 |
+
|
| 190 |
+
return projected_img, valid_points
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def cam2pixel2(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
|
| 194 |
+
"""Transform coordinates in the camera frame to the pixel frame.
|
| 195 |
+
Args:
|
| 196 |
+
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
|
| 197 |
+
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
|
| 198 |
+
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
|
| 199 |
+
Returns:
|
| 200 |
+
array of [-1,1] coordinates -- [B, 2, H, W]
|
| 201 |
+
"""
|
| 202 |
+
b, _, h, w = cam_coords.size()
|
| 203 |
+
cam_coords_flat = cam_coords.reshape(b, 3, -1) # [B, 3, H*W]
|
| 204 |
+
if proj_c2p_rot is not None:
|
| 205 |
+
pcoords = proj_c2p_rot @ cam_coords_flat
|
| 206 |
+
else:
|
| 207 |
+
pcoords = cam_coords_flat
|
| 208 |
+
|
| 209 |
+
if proj_c2p_tr is not None:
|
| 210 |
+
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
|
| 211 |
+
X = pcoords[:, 0]
|
| 212 |
+
Y = pcoords[:, 1]
|
| 213 |
+
Z = pcoords[:, 2].clamp(min=1e-3)
|
| 214 |
+
|
| 215 |
+
# Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
|
| 216 |
+
X_norm = 2*(X / Z)/(w-1) - 1
|
| 217 |
+
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
|
| 218 |
+
if padding_mode == 'zeros':
|
| 219 |
+
X_mask = ((X_norm > 1)+(X_norm < -1)).detach()
|
| 220 |
+
# make sure that no point in warped image is a combinaison of im and gray
|
| 221 |
+
X_norm[X_mask] = 2
|
| 222 |
+
Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()
|
| 223 |
+
Y_norm[Y_mask] = 2
|
| 224 |
+
|
| 225 |
+
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
|
| 226 |
+
return pixel_coords.reshape(b, h, w, 2), Z.reshape(b, 1, h, w)
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def inverse_warp2(img, depth, ref_depth, pose, intrinsics, padding_mode='zeros'):
|
| 230 |
+
"""
|
| 231 |
+
Inverse warp a source image to the target image plane.
|
| 232 |
+
Args:
|
| 233 |
+
img: the source image (where to sample pixels) -- [B, 3, H, W]
|
| 234 |
+
depth: depth map of the target image -- [B, 1, H, W]
|
| 235 |
+
ref_depth: the source depth map (where to sample depth) -- [B, 1, H, W]
|
| 236 |
+
pose: 6DoF pose parameters from target to source -- [B, 6]
|
| 237 |
+
intrinsics: camera intrinsic matrix -- [B, 3, 3]
|
| 238 |
+
Returns:
|
| 239 |
+
projected_img: Source image warped to the target image plane
|
| 240 |
+
valid_mask: Float array indicating point validity
|
| 241 |
+
projected_depth: sampled depth from source image
|
| 242 |
+
computed_depth: computed depth of source image using the target depth
|
| 243 |
+
"""
|
| 244 |
+
check_sizes(img, 'img', 'B3HW')
|
| 245 |
+
check_sizes(depth, 'depth', 'B1HW')
|
| 246 |
+
check_sizes(ref_depth, 'ref_depth', 'B1HW')
|
| 247 |
+
check_sizes(pose, 'pose', 'B6')
|
| 248 |
+
check_sizes(intrinsics, 'intrinsics', 'B33')
|
| 249 |
+
|
| 250 |
+
batch_size, _, img_height, img_width = img.size()
|
| 251 |
+
|
| 252 |
+
cam_coords = pixel2cam(depth.squeeze(1), intrinsics.inverse()) # [B,3,H,W]
|
| 253 |
+
|
| 254 |
+
pose_mat = pose_vec2mat(pose) # [B,3,4]
|
| 255 |
+
|
| 256 |
+
# Get projection matrix for tgt camera frame to source pixel frame
|
| 257 |
+
proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]
|
| 258 |
+
|
| 259 |
+
rot, tr = proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:]
|
| 260 |
+
src_pixel_coords, computed_depth = cam2pixel2(cam_coords, rot, tr, padding_mode) # [B,H,W,2]
|
| 261 |
+
projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode, align_corners=False)
|
| 262 |
+
|
| 263 |
+
projected_depth = F.grid_sample(ref_depth, src_pixel_coords, padding_mode=padding_mode, align_corners=False)
|
| 264 |
+
|
| 265 |
+
return projected_img, projected_depth, computed_depth
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def inverse_rotation_warp(img, rot, intrinsics, padding_mode='zeros'):
|
| 269 |
+
|
| 270 |
+
b, _, h, w = img.size()
|
| 271 |
+
cam_coords = pixel2cam(torch.ones(b, h, w).type_as(img), intrinsics.inverse()) # [B,3,H,W]
|
| 272 |
+
|
| 273 |
+
rot_mat = euler2mat(rot) # [B, 3, 3]
|
| 274 |
+
|
| 275 |
+
# Get projection matrix for tgt camera frame to source pixel frame
|
| 276 |
+
proj_cam_to_src_pixel = intrinsics @ rot_mat # [B, 3, 3]
|
| 277 |
+
|
| 278 |
+
src_pixel_coords, computed_depth = cam2pixel2(cam_coords, proj_cam_to_src_pixel, None, padding_mode) # [B,H,W,2]
|
| 279 |
+
projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode, align_corners=True)
|
| 280 |
+
|
| 281 |
+
return projected_img
|
| 282 |
+
|
| 283 |
+
def grid_to_flow(grid):
|
| 284 |
+
b, h, w, _ = grid.size()
|
| 285 |
+
i_range = torch.arange(0, h).view(1, h, 1).expand(1, h, w).type_as(grid) # [1, H, W]
|
| 286 |
+
j_range = torch.arange(0, w).view(1, 1, w).expand(1, h, w).type_as(grid) # [1, H, W]
|
| 287 |
+
image_coords = torch.stack((j_range, i_range), dim=1) # [1, 2, H, W]
|
| 288 |
+
|
| 289 |
+
flow = torch.zeros_like(grid).type_as(grid)
|
| 290 |
+
flow[:, :, :, 0] = (grid[:, :, :, 0]+1) / 2 * (w-1)
|
| 291 |
+
flow[:, :, :, 1] = (grid[:, :, :, 1]+1) / 2 * (h-1)
|
| 292 |
+
flow = flow.permute([0, 3, 1, 2])
|
| 293 |
+
|
| 294 |
+
flow -= image_coords
|
| 295 |
+
|
| 296 |
+
return flow
|
| 297 |
+
|
| 298 |
+
def compute_translation_flow(depth, pose, intrinsics):
|
| 299 |
+
cam_coords = pixel2cam(depth.squeeze(1), intrinsics.inverse()) # [B,3,H,W]
|
| 300 |
+
|
| 301 |
+
pose_mat = pose_vec2mat(pose) # [B,3,4]
|
| 302 |
+
|
| 303 |
+
# Get projection matrix for tgt camera frame to source pixel frame
|
| 304 |
+
proj_cam_to_src_pixel = intrinsics @ pose_mat # [B, 3, 4]
|
| 305 |
+
|
| 306 |
+
rot, tr = proj_cam_to_src_pixel[:, :, :3], proj_cam_to_src_pixel[:, :, -1:]
|
| 307 |
+
|
| 308 |
+
grid_all, _ = cam2pixel2(cam_coords, rot, tr, padding_mode='zeros') # [B,H,W,2]
|
| 309 |
+
grid_rot, _ = cam2pixel2(cam_coords, rot, None, padding_mode='zeros') # [B,H,W,2]
|
| 310 |
+
|
| 311 |
+
flow_all = grid_to_flow(grid_all)
|
| 312 |
+
flow_rot = grid_to_flow(grid_rot)
|
| 313 |
+
flow_tr = (flow_all - flow_rot)
|
| 314 |
+
|
| 315 |
+
return flow_tr
|
| 316 |
+
|
external/Metric3D/training/mono/utils/logger.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import atexit
|
| 2 |
+
import logging
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import time
|
| 6 |
+
import torch
|
| 7 |
+
from termcolor import colored
|
| 8 |
+
|
| 9 |
+
__all__ = ["setup_logger", ]
|
| 10 |
+
|
| 11 |
+
class _ColorfulFormatter(logging.Formatter):
|
| 12 |
+
def __init__(self, *args, **kwargs):
|
| 13 |
+
self._root_name = kwargs.pop("root_name") + "."
|
| 14 |
+
self._abbrev_name = kwargs.pop("abbrev_name", "")
|
| 15 |
+
if len(self._abbrev_name):
|
| 16 |
+
self._abbrev_name = self._abbrev_name + "."
|
| 17 |
+
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
|
| 18 |
+
|
| 19 |
+
def formatMessage(self, record):
|
| 20 |
+
record.name = record.name.replace(self._root_name, self._abbrev_name)
|
| 21 |
+
log = super(_ColorfulFormatter, self).formatMessage(record)
|
| 22 |
+
if record.levelno == logging.WARNING:
|
| 23 |
+
prefix = colored("WARNING", "red", attrs=["blink"])
|
| 24 |
+
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
|
| 25 |
+
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
|
| 26 |
+
else:
|
| 27 |
+
return log
|
| 28 |
+
return prefix + " " + log
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def setup_logger(
|
| 32 |
+
output=None, distributed_rank=0, *, name='mono@YvanYin', color=True, abbrev_name=None
|
| 33 |
+
):
|
| 34 |
+
"""
|
| 35 |
+
Initialize the detectron2 logger and set its verbosity level to "DEBUG".
|
| 36 |
+
Args:
|
| 37 |
+
output (str): a file name or a directory to save log. If None, will not save log file.
|
| 38 |
+
If ends with ".txt" or ".log", assumed to be a file name.
|
| 39 |
+
Otherwise, logs will be saved to `output/log.txt`.
|
| 40 |
+
abbrev_name (str): an abbreviation of the module, to avoid long names in logs.
|
| 41 |
+
Set to "" to not log the root module in logs.
|
| 42 |
+
By default, will abbreviate "detectron2" to "d2" and leave other
|
| 43 |
+
modules unchanged.
|
| 44 |
+
Returns:
|
| 45 |
+
logging.Logger: a logger
|
| 46 |
+
"""
|
| 47 |
+
logger = logging.getLogger()
|
| 48 |
+
logger.setLevel(logging.DEBUG)
|
| 49 |
+
logger.propagate = False
|
| 50 |
+
|
| 51 |
+
if abbrev_name is None:
|
| 52 |
+
abbrev_name = "d2"
|
| 53 |
+
|
| 54 |
+
plain_formatter = logging.Formatter(
|
| 55 |
+
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
|
| 56 |
+
)
|
| 57 |
+
# stdout logging: master only
|
| 58 |
+
if distributed_rank == 0:
|
| 59 |
+
ch = logging.StreamHandler(stream=sys.stdout)
|
| 60 |
+
ch.setLevel(logging.DEBUG)
|
| 61 |
+
if color:
|
| 62 |
+
formatter = _ColorfulFormatter(
|
| 63 |
+
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
|
| 64 |
+
datefmt="%m/%d %H:%M:%S",
|
| 65 |
+
root_name=name,
|
| 66 |
+
abbrev_name=str(abbrev_name),
|
| 67 |
+
)
|
| 68 |
+
else:
|
| 69 |
+
formatter = plain_formatter
|
| 70 |
+
ch.setFormatter(formatter)
|
| 71 |
+
logger.addHandler(ch)
|
| 72 |
+
|
| 73 |
+
# file logging: all workers
|
| 74 |
+
if output is not None:
|
| 75 |
+
if output.endswith(".txt") or output.endswith(".log"):
|
| 76 |
+
filename = output
|
| 77 |
+
else:
|
| 78 |
+
filename = os.path.join(output, "log.txt")
|
| 79 |
+
if distributed_rank > 0:
|
| 80 |
+
filename = filename + ".rank{}".format(distributed_rank)
|
| 81 |
+
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
| 82 |
+
|
| 83 |
+
# fh = logging.FileHandler(output, 'w')
|
| 84 |
+
fh = logging.StreamHandler(_cached_log_stream(filename))
|
| 85 |
+
fh.setLevel(logging.DEBUG)
|
| 86 |
+
fh.setFormatter(plain_formatter)
|
| 87 |
+
logger.addHandler(fh)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
return logger
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
from iopath.common.file_io import PathManager as PathManagerBase
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
PathManager = PathManagerBase()
|
| 98 |
+
|
| 99 |
+
# cache the opened file object, so that different calls to `setup_logger`
|
| 100 |
+
# with the same file name can safely write to the same file.
|
| 101 |
+
def _cached_log_stream(filename):
|
| 102 |
+
# use 1K buffer if writing to cloud storage
|
| 103 |
+
io = PathManager.open(filename, "a", buffering=1024 if "://" in filename else -1)
|
| 104 |
+
atexit.register(io.close)
|
| 105 |
+
return io
|
external/Metric3D/training/mono/utils/logit_to_depth.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class SoftWeight(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Transfer n-channel discrete depth bins to a depth map.
|
| 7 |
+
Args:
|
| 8 |
+
@depth_bin: n-channel output of the network, [b, c, h, w]
|
| 9 |
+
Return: 1-channel depth, [b, 1, h, w]
|
| 10 |
+
"""
|
| 11 |
+
def __init__(self, depth_bins_border):
|
| 12 |
+
super(SoftWeight, self).__init__()
|
| 13 |
+
self.register_buffer("depth_bins_border", torch.tensor(depth_bins_border), persistent=False)
|
| 14 |
+
|
| 15 |
+
def forward(self, pred_logit):
|
| 16 |
+
if type(pred_logit).__module__ != torch.__name__:
|
| 17 |
+
pred_logit = torch.tensor(pred_logit, dtype=torch.float32, device="cuda")
|
| 18 |
+
pred_score = nn.functional.softmax(pred_logit, dim=1)
|
| 19 |
+
pred_score_ch = pred_score.permute(0, 2, 3, 1) #[b, h, w, c]
|
| 20 |
+
pred_score_weight = pred_score_ch * self.depth_bins_border
|
| 21 |
+
depth_log = torch.sum(pred_score_weight, dim=3, dtype=torch.float32, keepdim=True)
|
| 22 |
+
depth = 10 ** depth_log
|
| 23 |
+
depth = depth.permute(0, 3, 1, 2) # [b, 1, h, w]
|
| 24 |
+
confidence, _ = torch.max(pred_logit, dim=1, keepdim=True)
|
| 25 |
+
return depth, confidence
|
| 26 |
+
|
| 27 |
+
def soft_weight(pred_logit, depth_bins_border):
|
| 28 |
+
"""
|
| 29 |
+
Transfer n-channel discrete depth bins to depth map.
|
| 30 |
+
Args:
|
| 31 |
+
@depth_bin: n-channel output of the network, [b, c, h, w]
|
| 32 |
+
Return: 1-channel depth, [b, 1, h, w]
|
| 33 |
+
"""
|
| 34 |
+
if type(pred_logit).__module__ != torch.__name__:
|
| 35 |
+
pred_logit = torch.tensor(pred_logit, dtype=torch.float32, device="cuda")
|
| 36 |
+
if type(depth_bins_border).__module__ != torch.__name__:
|
| 37 |
+
depth_bins_border = torch.tensor(depth_bins_border, dtype=torch.float32, device="cuda")
|
| 38 |
+
|
| 39 |
+
pred_score = nn.functional.softmax(pred_logit, dim=1)
|
| 40 |
+
depth_bins_ch = pred_score.permute(0, 2, 3, 1) #[b, h, w, c] depth = torch.sum(depth, dim=3, dtype=torch.float32, keepdim=True)
|
| 41 |
+
depth = 10 ** depth
|
| 42 |
+
depth = depth.permute(0, 3, 1, 2) # [b, 1, h, w]
|
| 43 |
+
|
| 44 |
+
confidence, _ = torch.max(pred_logit, dim=1, keepdim=True)
|
| 45 |
+
return depth, confidence
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
if __name__ == '__main__':
|
| 50 |
+
import numpy as np
|
| 51 |
+
depth_max = 100
|
| 52 |
+
depth_min = 0.5
|
| 53 |
+
|
| 54 |
+
depth_bin_interval = (np.log10(depth_max) - np.log10(depth_min)) / 200
|
| 55 |
+
depth_bins_border = [np.log10(depth_min) + depth_bin_interval * (i + 0.5)
|
| 56 |
+
for i in range(200)]
|
| 57 |
+
|
| 58 |
+
sw = SoftWeight(depth_bins_border)
|
external/Metric3D/training/mono/utils/misc.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
import torch
|
| 6 |
+
try:
|
| 7 |
+
from torch._six import inf
|
| 8 |
+
except:
|
| 9 |
+
from torch import inf
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class NativeScalerWithGradNormCount:
|
| 13 |
+
state_dict_key = "amp_scaler"
|
| 14 |
+
|
| 15 |
+
def __init__(self):
|
| 16 |
+
#self._scaler = torch.cuda.amp.GradScaler(init_scale=16384) #init_scale=4096.0
|
| 17 |
+
self._scaler = torch.cuda.amp.GradScaler(init_scale=1)
|
| 18 |
+
|
| 19 |
+
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
|
| 20 |
+
self._scaler.scale(loss).backward(create_graph=create_graph)
|
| 21 |
+
if update_grad:
|
| 22 |
+
if clip_grad is not None:
|
| 23 |
+
assert parameters is not None
|
| 24 |
+
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
|
| 25 |
+
try:
|
| 26 |
+
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad, error_if_nonfinite=True)
|
| 27 |
+
except:
|
| 28 |
+
print('NAN gradient ....')
|
| 29 |
+
else:
|
| 30 |
+
raise NotImplementedError
|
| 31 |
+
self._scaler.unscale_(optimizer)
|
| 32 |
+
norm = get_grad_norm_(parameters)
|
| 33 |
+
self._scaler.step(optimizer)
|
| 34 |
+
self._scaler.update()
|
| 35 |
+
else:
|
| 36 |
+
norm = None
|
| 37 |
+
return True
|
| 38 |
+
#return norm
|
| 39 |
+
|
| 40 |
+
def state_dict(self):
|
| 41 |
+
return self._scaler.state_dict()
|
| 42 |
+
|
| 43 |
+
def load_state_dict(self, state_dict):
|
| 44 |
+
self._scaler.load_state_dict(state_dict)
|
| 45 |
+
|
| 46 |
+
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
|
| 47 |
+
if isinstance(parameters, torch.Tensor):
|
| 48 |
+
parameters = [parameters]
|
| 49 |
+
parameters = [p for p in parameters if p.grad is not None]
|
| 50 |
+
norm_type = float(norm_type)
|
| 51 |
+
if len(parameters) == 0:
|
| 52 |
+
return torch.tensor(0.)
|
| 53 |
+
device = parameters[0].grad.device
|
| 54 |
+
if norm_type == inf:
|
| 55 |
+
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
|
| 56 |
+
else:
|
| 57 |
+
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
|
| 58 |
+
return total_norm
|
| 59 |
+
|
| 60 |
+
def is_bf16_supported():
|
| 61 |
+
"""Returns a bool indicating if the current CUDA device supports dtype bfloat16"""
|
| 62 |
+
cu_vers = torch.version.cuda
|
| 63 |
+
if cu_vers is not None:
|
| 64 |
+
cuda_maj_decide = int(cu_vers.split('.')[0]) >= 11
|
| 65 |
+
else:
|
| 66 |
+
cuda_maj_decide = False
|
| 67 |
+
return torch.cuda.get_device_properties(torch.cuda.current_device()).major >= 8 and cuda_maj_decide
|
external/Metric3D/training/mono/utils/pcd_utils.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
from plyfile import PlyData, PlyElement
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def save_point_cloud(pcd, rgb, filename, binary=True):
|
| 7 |
+
"""Save an RGB point cloud as a PLY file.
|
| 8 |
+
:paras
|
| 9 |
+
@pcd: Nx3 matrix, the XYZ coordinates
|
| 10 |
+
@rgb: NX3 matrix, the rgb colors for each 3D point
|
| 11 |
+
"""
|
| 12 |
+
assert pcd.shape[0] == rgb.shape[0]
|
| 13 |
+
|
| 14 |
+
if rgb is None:
|
| 15 |
+
gray_concat = np.tile(np.array([128], dtype=np.uint8), (pcd.shape[0], 3))
|
| 16 |
+
points_3d = np.hstack((pcd, gray_concat))
|
| 17 |
+
else:
|
| 18 |
+
points_3d = np.hstack((pcd, rgb))
|
| 19 |
+
python_types = (float, float, float, int, int, int)
|
| 20 |
+
npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
|
| 21 |
+
('blue', 'u1')]
|
| 22 |
+
if binary is True:
|
| 23 |
+
# Format into NumPy structured array
|
| 24 |
+
vertices = []
|
| 25 |
+
for row_idx in range(points_3d.shape[0]):
|
| 26 |
+
cur_point = points_3d[row_idx]
|
| 27 |
+
vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point)))
|
| 28 |
+
vertices_array = np.array(vertices, dtype=npy_types)
|
| 29 |
+
el = PlyElement.describe(vertices_array, 'vertex')
|
| 30 |
+
|
| 31 |
+
# Write
|
| 32 |
+
PlyData([el]).write(filename)
|
| 33 |
+
else:
|
| 34 |
+
x = np.squeeze(points_3d[:, 0])
|
| 35 |
+
y = np.squeeze(points_3d[:, 1])
|
| 36 |
+
z = np.squeeze(points_3d[:, 2])
|
| 37 |
+
r = np.squeeze(points_3d[:, 3])
|
| 38 |
+
g = np.squeeze(points_3d[:, 4])
|
| 39 |
+
b = np.squeeze(points_3d[:, 5])
|
| 40 |
+
|
| 41 |
+
ply_head = 'ply\n' \
|
| 42 |
+
'format ascii 1.0\n' \
|
| 43 |
+
'element vertex %d\n' \
|
| 44 |
+
'property float x\n' \
|
| 45 |
+
'property float y\n' \
|
| 46 |
+
'property float z\n' \
|
| 47 |
+
'property uchar red\n' \
|
| 48 |
+
'property uchar green\n' \
|
| 49 |
+
'property uchar blue\n' \
|
| 50 |
+
'end_header' % r.shape[0]
|
| 51 |
+
# ---- Save ply data to disk
|
| 52 |
+
np.savetxt(filename, np.column_stack((x, y, z, r, g, b)), fmt="%d %d %d %d %d %d", header=ply_head, comments='')
|
external/Metric3D/training/mono/utils/running.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from mono.utils.comm import main_process
|
| 5 |
+
import copy
|
| 6 |
+
import inspect
|
| 7 |
+
import logging
|
| 8 |
+
import glob
|
| 9 |
+
|
| 10 |
+
class LrUpdater():
|
| 11 |
+
"""Refer to LR Scheduler in MMCV.
|
| 12 |
+
Args:
|
| 13 |
+
@by_epoch (bool): LR changes epoch by epoch
|
| 14 |
+
@warmup (string): Type of warmup used. It can be None(use no warmup),
|
| 15 |
+
'constant', 'linear' or 'exp'
|
| 16 |
+
@warmup_iters (int): The number of iterations or epochs that warmup
|
| 17 |
+
lasts. Note when by_epoch == True, warmup_iters means the number
|
| 18 |
+
of epochs that warmup lasts, otherwise means the number of
|
| 19 |
+
iteration that warmup lasts
|
| 20 |
+
@warmup_ratio (float): LR used at the beginning of warmup equals to
|
| 21 |
+
warmup_ratio * initial_lr
|
| 22 |
+
@runner (dict): Configs for running. Run by epoches or iters.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self,
|
| 26 |
+
by_epoch: bool=True,
|
| 27 |
+
warmup: str=None,
|
| 28 |
+
warmup_iters: int=0,
|
| 29 |
+
warmup_ratio: float=0.1,
|
| 30 |
+
runner: dict={}):
|
| 31 |
+
# validate the "warmup" argument
|
| 32 |
+
if warmup is not None:
|
| 33 |
+
if warmup not in ['constant', 'linear', 'exp']:
|
| 34 |
+
raise ValueError(
|
| 35 |
+
f'"{warmup}" is not a supported type for warming up, valid'
|
| 36 |
+
' types are "constant" and "linear"')
|
| 37 |
+
if warmup is not None:
|
| 38 |
+
assert warmup_iters > 0, \
|
| 39 |
+
'"warmup_iters" must be a positive integer'
|
| 40 |
+
assert 0 < warmup_ratio <= 1.0, \
|
| 41 |
+
'"warmup_ratio" must be in range (0,1]'
|
| 42 |
+
|
| 43 |
+
if runner is None:
|
| 44 |
+
raise RuntimeError('runner should be set.')
|
| 45 |
+
|
| 46 |
+
self.by_epoch = by_epoch
|
| 47 |
+
self.warmup = warmup
|
| 48 |
+
self.warmup_iters = warmup_iters
|
| 49 |
+
self.warmup_ratio = warmup_ratio
|
| 50 |
+
self.runner = runner
|
| 51 |
+
|
| 52 |
+
self.max_iters = None
|
| 53 |
+
self.max_epoches = None
|
| 54 |
+
if 'IterBasedRunner' in self.runner.type:
|
| 55 |
+
self.max_iters = self.runner.max_iters
|
| 56 |
+
assert self.by_epoch==False
|
| 57 |
+
self.warmup_by_epoch = False
|
| 58 |
+
elif 'EpochBasedRunner' in self.runner.type:
|
| 59 |
+
self.max_epoches = self.runner.max_epoches
|
| 60 |
+
assert self.by_epoch==True
|
| 61 |
+
self.warmup_by_epoch = True
|
| 62 |
+
else:
|
| 63 |
+
raise ValueError(f'{self.runner.type} is not a supported type for running.')
|
| 64 |
+
|
| 65 |
+
if self.warmup_by_epoch:
|
| 66 |
+
self.warmup_epochs = self.warmup_iters
|
| 67 |
+
self.warmup_iters = None
|
| 68 |
+
else:
|
| 69 |
+
self.warmup_epochs = None
|
| 70 |
+
|
| 71 |
+
self.base_lr = [] # initial lr for all param groups
|
| 72 |
+
self.regular_lr = [] # expected lr if no warming up is performed
|
| 73 |
+
self._step_count = 0
|
| 74 |
+
|
| 75 |
+
def _set_lr(self, optimizer, lr_groups):
|
| 76 |
+
if isinstance(optimizer, dict):
|
| 77 |
+
for k, optim in optimizer.items():
|
| 78 |
+
for param_group, lr in zip(optim.param_groups, lr_groups[k]):
|
| 79 |
+
param_group['lr'] = lr
|
| 80 |
+
else:
|
| 81 |
+
for param_group, lr in zip(optimizer.param_groups,
|
| 82 |
+
lr_groups):
|
| 83 |
+
param_group['lr'] = lr
|
| 84 |
+
|
| 85 |
+
def get_lr(self, _iter, max_iter, base_lr):
|
| 86 |
+
raise NotImplementedError
|
| 87 |
+
|
| 88 |
+
def get_regular_lr(self, _iter, optimizer):
|
| 89 |
+
max_iters = self.max_iters if not self.by_epoch else self.max_epoches
|
| 90 |
+
|
| 91 |
+
if isinstance(optimizer, dict):
|
| 92 |
+
lr_groups = {}
|
| 93 |
+
for k in optimizer.keys():
|
| 94 |
+
_lr_group = [
|
| 95 |
+
self.get_lr(_iter, max_iters, _base_lr)
|
| 96 |
+
for _base_lr in self.base_lr[k]
|
| 97 |
+
]
|
| 98 |
+
lr_groups.update({k: _lr_group})
|
| 99 |
+
|
| 100 |
+
return lr_groups
|
| 101 |
+
else:
|
| 102 |
+
return [self.get_lr(_iter, max_iters, _base_lr) for _base_lr in self.base_lr]
|
| 103 |
+
|
| 104 |
+
def get_warmup_lr(self, cur_iters):
|
| 105 |
+
|
| 106 |
+
def _get_warmup_lr(cur_iters, regular_lr):
|
| 107 |
+
if self.warmup == 'constant':
|
| 108 |
+
warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr]
|
| 109 |
+
elif self.warmup == 'linear':
|
| 110 |
+
k = (1 - cur_iters / self.warmup_iters) * (1 -
|
| 111 |
+
self.warmup_ratio)
|
| 112 |
+
warmup_lr = [_lr * (1 - k) for _lr in regular_lr]
|
| 113 |
+
elif self.warmup == 'exp':
|
| 114 |
+
k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
|
| 115 |
+
warmup_lr = [_lr * k for _lr in regular_lr]
|
| 116 |
+
return warmup_lr
|
| 117 |
+
|
| 118 |
+
if isinstance(self.regular_lr, dict):
|
| 119 |
+
lr_groups = {}
|
| 120 |
+
for key, regular_lr in self.regular_lr.items():
|
| 121 |
+
lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr)
|
| 122 |
+
return lr_groups
|
| 123 |
+
else:
|
| 124 |
+
return _get_warmup_lr(cur_iters, self.regular_lr)
|
| 125 |
+
|
| 126 |
+
def before_run(self, optimizer):
|
| 127 |
+
# NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved,
|
| 128 |
+
# it will be set according to the optimizer params
|
| 129 |
+
if isinstance(optimizer, dict):
|
| 130 |
+
self.base_lr = {}
|
| 131 |
+
for k, optim in optimizer.items():
|
| 132 |
+
for group in optim.param_groups:
|
| 133 |
+
group.setdefault('initial_lr', group['lr'])
|
| 134 |
+
_base_lr = [
|
| 135 |
+
group['initial_lr'] for group in optim.param_groups
|
| 136 |
+
]
|
| 137 |
+
self.base_lr.update({k: _base_lr})
|
| 138 |
+
else:
|
| 139 |
+
for group in optimizer.param_groups:
|
| 140 |
+
group.setdefault('initial_lr', group['lr'])
|
| 141 |
+
self.base_lr = [
|
| 142 |
+
group['initial_lr'] for group in optimizer.param_groups
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
def after_train_epoch(self, optimizer):
|
| 146 |
+
self._step_count += 1
|
| 147 |
+
curr_epoch = self._step_count
|
| 148 |
+
self.regular_lr = self.get_regular_lr(curr_epoch, optimizer)
|
| 149 |
+
if self.warmup is None or curr_epoch > self.warmup_epoches:
|
| 150 |
+
self._set_lr(optimizer, self.regular_lr)
|
| 151 |
+
else:
|
| 152 |
+
#self.warmup_iters = int(self.warmup_epochs * epoch_len)
|
| 153 |
+
warmup_lr = self.get_warmup_lr(curr_epoch)
|
| 154 |
+
self._set_lr(optimizer, warmup_lr)
|
| 155 |
+
|
| 156 |
+
def after_train_iter(self, optimizer):
|
| 157 |
+
self._step_count += 1
|
| 158 |
+
cur_iter = self._step_count
|
| 159 |
+
self.regular_lr = self.get_regular_lr(cur_iter, optimizer)
|
| 160 |
+
if self.warmup is None or cur_iter >= self.warmup_iters:
|
| 161 |
+
self._set_lr(optimizer, self.regular_lr)
|
| 162 |
+
else:
|
| 163 |
+
warmup_lr = self.get_warmup_lr(cur_iter)
|
| 164 |
+
self._set_lr(optimizer, warmup_lr)
|
| 165 |
+
|
| 166 |
+
def get_curr_lr(self, cur_iter):
|
| 167 |
+
if self.warmup is None or cur_iter >= self.warmup_iters:
|
| 168 |
+
return self.regular_lr
|
| 169 |
+
else:
|
| 170 |
+
return self.get_warmup_lr(cur_iter)
|
| 171 |
+
|
| 172 |
+
def state_dict(self):
|
| 173 |
+
"""
|
| 174 |
+
Returns the state of the scheduler as a :class:`dict`.
|
| 175 |
+
It contains an entry for every variable in self.__dict__ which
|
| 176 |
+
is not the optimizer.
|
| 177 |
+
"""
|
| 178 |
+
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
|
| 179 |
+
|
| 180 |
+
def load_state_dict(self, state_dict):
|
| 181 |
+
"""Loads the schedulers state.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
@state_dict (dict): scheduler state. Should be an object returned
|
| 185 |
+
from a call to :meth:`state_dict`.
|
| 186 |
+
"""
|
| 187 |
+
self.__dict__.update(state_dict)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class PolyLrUpdater(LrUpdater):
|
| 191 |
+
|
| 192 |
+
def __init__(self, power=1., min_lr=0., **kwargs):
|
| 193 |
+
self.power = power
|
| 194 |
+
self.min_lr = min_lr
|
| 195 |
+
super(PolyLrUpdater, self).__init__(**kwargs)
|
| 196 |
+
|
| 197 |
+
def get_lr(self, _iter, max_iters, base_lr):
|
| 198 |
+
progress = _iter
|
| 199 |
+
max_progress = max_iters
|
| 200 |
+
coeff = (1 - progress / max_progress)**self.power
|
| 201 |
+
return (base_lr - self.min_lr) * coeff + self.min_lr
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def build_lr_schedule_with_cfg(cfg):
|
| 205 |
+
# build learning rate schedule with config.
|
| 206 |
+
lr_config = copy.deepcopy(cfg.lr_config)
|
| 207 |
+
policy = lr_config.pop('policy')
|
| 208 |
+
if cfg.lr_config.policy == 'poly':
|
| 209 |
+
schedule = PolyLrUpdater(runner=cfg.runner, **lr_config)
|
| 210 |
+
else:
|
| 211 |
+
raise RuntimeError(f'{cfg.lr_config.policy} \
|
| 212 |
+
is not supported in this framework.')
|
| 213 |
+
return schedule
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
#def step_learning_rate(base_lr, epoch, step_epoch, multiplier=0.1):
|
| 217 |
+
# """Sets the learning rate to the base LR decayed by 10 every step epochs"""
|
| 218 |
+
# lr = base_lr * (multiplier ** (epoch // step_epoch))
|
| 219 |
+
# return lr
|
| 220 |
+
|
| 221 |
+
def register_torch_optimizers():
|
| 222 |
+
torch_optimizers = {}
|
| 223 |
+
for module_name in dir(torch.optim):
|
| 224 |
+
if module_name.startswith('__'):
|
| 225 |
+
continue
|
| 226 |
+
_optim = getattr(torch.optim, module_name)
|
| 227 |
+
if inspect.isclass(_optim) and issubclass(_optim,
|
| 228 |
+
torch.optim.Optimizer):
|
| 229 |
+
torch_optimizers[module_name] = _optim
|
| 230 |
+
return torch_optimizers
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
TORCH_OPTIMIZER = register_torch_optimizers()
|
| 234 |
+
|
| 235 |
+
def build_optimizer_with_cfg(cfg, model):
|
| 236 |
+
# encoder_parameters = []
|
| 237 |
+
# decoder_parameters = []
|
| 238 |
+
# nongrad_parameters = []
|
| 239 |
+
# for key, value in dict(model.named_parameters()).items():
|
| 240 |
+
# if value.requires_grad:
|
| 241 |
+
# if 'encoder' in key:
|
| 242 |
+
# encoder_parameters.append(value)
|
| 243 |
+
# else:
|
| 244 |
+
# decoder_parameters.append(value)
|
| 245 |
+
# else:
|
| 246 |
+
# nongrad_parameters.append(value)
|
| 247 |
+
|
| 248 |
+
#params = [{"params": filter(lambda p: p.requires_grad, model.parameters())}]
|
| 249 |
+
optim_cfg = copy.deepcopy(cfg.optimizer)
|
| 250 |
+
optim_type = optim_cfg.pop('type', None)
|
| 251 |
+
|
| 252 |
+
if optim_type is None:
|
| 253 |
+
raise RuntimeError(f'{optim_type} is not set')
|
| 254 |
+
if optim_type not in TORCH_OPTIMIZER:
|
| 255 |
+
raise RuntimeError(f'{optim_type} is not supported in torch {torch.__version__}')
|
| 256 |
+
if 'others' not in optim_cfg:
|
| 257 |
+
optim_cfg['others'] = optim_cfg['decoder']
|
| 258 |
+
|
| 259 |
+
def match(key1, key_list, strict_match=False):
|
| 260 |
+
if not strict_match:
|
| 261 |
+
for k in key_list:
|
| 262 |
+
if k in key1:
|
| 263 |
+
return k
|
| 264 |
+
else:
|
| 265 |
+
for k in key_list:
|
| 266 |
+
if k == key1.split('.')[1]:
|
| 267 |
+
return k
|
| 268 |
+
return None
|
| 269 |
+
optim_obj = TORCH_OPTIMIZER[optim_type]
|
| 270 |
+
matching_type = optim_cfg.pop('strict_match', False)
|
| 271 |
+
|
| 272 |
+
module_names = optim_cfg.keys()
|
| 273 |
+
model_parameters = {i: [] for i in module_names}
|
| 274 |
+
model_parameters['others'] = []
|
| 275 |
+
nongrad_parameters = []
|
| 276 |
+
for key, value in dict(model.named_parameters()).items():
|
| 277 |
+
if value.requires_grad:
|
| 278 |
+
match_key = match(key, module_names, matching_type)
|
| 279 |
+
# if optim_cfg[match_key]['lr'] == 0:
|
| 280 |
+
# value.requires_grad=False
|
| 281 |
+
# continue
|
| 282 |
+
if match_key is None:
|
| 283 |
+
model_parameters['others'].append(value)
|
| 284 |
+
else:
|
| 285 |
+
model_parameters[match_key].append(value)
|
| 286 |
+
else:
|
| 287 |
+
nongrad_parameters.append(value)
|
| 288 |
+
|
| 289 |
+
optims = [{'params':model_parameters[k], **optim_cfg[k]} for k in optim_cfg.keys()]
|
| 290 |
+
optimizer = optim_obj(optims)
|
| 291 |
+
# optim_args_encoder = optim_cfg.optimizer.encoder
|
| 292 |
+
# optim_args_decoder = optim_cfg.optimizer.decoder
|
| 293 |
+
# optimizer = optim_obj(
|
| 294 |
+
# [{'params':encoder_parameters, **optim_args_encoder},
|
| 295 |
+
# {'params':decoder_parameters, **optim_args_decoder},
|
| 296 |
+
# ])
|
| 297 |
+
|
| 298 |
+
return optimizer
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def load_ckpt(load_path, model, optimizer=None, scheduler=None, strict_match=True, loss_scaler=None):
|
| 302 |
+
"""
|
| 303 |
+
Load the check point for resuming training or finetuning.
|
| 304 |
+
"""
|
| 305 |
+
logger = logging.getLogger()
|
| 306 |
+
if os.path.isfile(load_path):
|
| 307 |
+
if main_process():
|
| 308 |
+
logger.info(f"Loading weight '{load_path}'")
|
| 309 |
+
checkpoint = torch.load(load_path, map_location="cpu")
|
| 310 |
+
ckpt_state_dict = checkpoint['model_state_dict']
|
| 311 |
+
model.module.load_state_dict(ckpt_state_dict, strict=strict_match)
|
| 312 |
+
|
| 313 |
+
if optimizer is not None:
|
| 314 |
+
optimizer.load_state_dict(checkpoint['optimizer'])
|
| 315 |
+
if scheduler is not None:
|
| 316 |
+
scheduler.load_state_dict(checkpoint['scheduler'])
|
| 317 |
+
if loss_scaler is not None and 'scaler' in checkpoint:
|
| 318 |
+
loss_scaler.load_state_dict(checkpoint['scaler'])
|
| 319 |
+
print('Loss scaler loaded', loss_scaler)
|
| 320 |
+
del ckpt_state_dict
|
| 321 |
+
del checkpoint
|
| 322 |
+
if main_process():
|
| 323 |
+
logger.info(f"Successfully loaded weight: '{load_path}'")
|
| 324 |
+
if scheduler is not None and optimizer is not None:
|
| 325 |
+
logger.info(f"Resume training from: '{load_path}'")
|
| 326 |
+
else:
|
| 327 |
+
if main_process():
|
| 328 |
+
raise RuntimeError(f"No weight found at '{load_path}'")
|
| 329 |
+
return model, optimizer, scheduler, loss_scaler
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def save_ckpt(cfg, model, optimizer, scheduler, curr_iter=0, curr_epoch=None, loss_scaler=None):
|
| 333 |
+
"""
|
| 334 |
+
Save the model, optimizer, lr scheduler.
|
| 335 |
+
"""
|
| 336 |
+
logger = logging.getLogger()
|
| 337 |
+
|
| 338 |
+
if 'IterBasedRunner' in cfg.runner.type:
|
| 339 |
+
max_iters = cfg.runner.max_iters
|
| 340 |
+
elif 'EpochBasedRunner' in cfg.runner.type:
|
| 341 |
+
max_iters = cfg.runner.max_epoches
|
| 342 |
+
else:
|
| 343 |
+
raise TypeError(f'{cfg.runner.type} is not supported')
|
| 344 |
+
|
| 345 |
+
ckpt = dict(model_state_dict=model.module.state_dict(),
|
| 346 |
+
optimizer=optimizer.state_dict(),
|
| 347 |
+
max_iter=cfg.runner.max_iters if 'max_iters' in cfg.runner \
|
| 348 |
+
else cfg.runner.max_epoches,
|
| 349 |
+
scheduler=scheduler.state_dict(),
|
| 350 |
+
# current_iter=curr_iter,
|
| 351 |
+
# current_epoch=curr_epoch,
|
| 352 |
+
)
|
| 353 |
+
if loss_scaler is not None:
|
| 354 |
+
# amp state_dict
|
| 355 |
+
ckpt.update(dict(scaler=loss_scaler.state_dict()))
|
| 356 |
+
|
| 357 |
+
ckpt_dir = os.path.join(cfg.work_dir, 'ckpt')
|
| 358 |
+
os.makedirs(ckpt_dir, exist_ok=True)
|
| 359 |
+
|
| 360 |
+
save_name = os.path.join(ckpt_dir, 'step%08d.pth' % curr_iter)
|
| 361 |
+
saved_ckpts = glob.glob(ckpt_dir + '/step*.pth')
|
| 362 |
+
torch.save(ckpt, save_name)
|
| 363 |
+
|
| 364 |
+
# keep the last 8 ckpts
|
| 365 |
+
if len(saved_ckpts) > 8:
|
| 366 |
+
saved_ckpts.sort()
|
| 367 |
+
os.remove(saved_ckpts.pop(0))
|
| 368 |
+
|
| 369 |
+
logger.info(f'Save model: {save_name}')
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
if __name__ == '__main__':
|
| 374 |
+
print(TORCH_OPTIMIZER)
|
external/Metric3D/training/mono/utils/transform.py
ADDED
|
@@ -0,0 +1,1491 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#import collections
|
| 2 |
+
import collections.abc as collections
|
| 3 |
+
import cv2
|
| 4 |
+
import math
|
| 5 |
+
import numpy as np
|
| 6 |
+
import numbers
|
| 7 |
+
import random
|
| 8 |
+
import torch
|
| 9 |
+
from imgaug import augmenters as iaa
|
| 10 |
+
import matplotlib
|
| 11 |
+
import matplotlib.cm
|
| 12 |
+
import mono.utils.weather_aug_utils as wa
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
Provides a set of Pytorch transforms that use OpenCV instead of PIL (Pytorch default)
|
| 16 |
+
for image manipulation.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
class Compose(object):
|
| 20 |
+
# Composes transforms: transforms.Compose([transforms.RandScale([0.5, 2.0]), transforms.ToTensor()])
|
| 21 |
+
def __init__(self, transforms):
|
| 22 |
+
self.transforms = transforms
|
| 23 |
+
|
| 24 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 25 |
+
for t in self.transforms:
|
| 26 |
+
images, labels, intrinsics, cam_models, normals, other_labels, transform_paras = t(images, labels, intrinsics, cam_models, normals, other_labels, transform_paras)
|
| 27 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 28 |
+
|
| 29 |
+
class ToTensor(object):
|
| 30 |
+
# Converts numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W).
|
| 31 |
+
def __init__(self, **kwargs):
|
| 32 |
+
return
|
| 33 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 34 |
+
if not isinstance(images, list) or not isinstance(labels, list) or not isinstance(intrinsics, list):
|
| 35 |
+
raise (RuntimeError("transform.ToTensor() only handle inputs/labels/intrinsics lists."))
|
| 36 |
+
if len(images) != len(intrinsics):
|
| 37 |
+
raise (RuntimeError("Numbers of images and intrinsics are not matched."))
|
| 38 |
+
if not isinstance(images[0], np.ndarray) or not isinstance(labels[0], np.ndarray):
|
| 39 |
+
raise (RuntimeError("transform.ToTensor() only handle np.ndarray for the input and label."
|
| 40 |
+
"[eg: data readed by cv2.imread()].\n"))
|
| 41 |
+
if not isinstance(intrinsics[0], list):
|
| 42 |
+
raise (RuntimeError("transform.ToTensor() only handle list for the camera intrinsics"))
|
| 43 |
+
|
| 44 |
+
if len(images[0].shape) > 3 or len(images[0].shape) < 2:
|
| 45 |
+
raise (RuntimeError("transform.ToTensor() only handle image(np.ndarray) with 3 dims or 2 dims.\n"))
|
| 46 |
+
if len(labels[0].shape) > 3 or len(labels[0].shape) < 2:
|
| 47 |
+
raise (RuntimeError("transform.ToTensor() only handle label(np.ndarray) with 3 dims or 2 dims.\n"))
|
| 48 |
+
|
| 49 |
+
if len(intrinsics[0]) >4 or len(intrinsics[0]) < 3:
|
| 50 |
+
raise (RuntimeError("transform.ToTensor() only handle intrinsic(list) with 3 sizes or 4 sizes.\n"))
|
| 51 |
+
|
| 52 |
+
for i, img in enumerate(images):
|
| 53 |
+
if len(img.shape) == 2:
|
| 54 |
+
img = np.expand_dims(img, axis=2)
|
| 55 |
+
images[i] = torch.from_numpy(img.transpose((2, 0, 1))).float()
|
| 56 |
+
for i, lab in enumerate(labels):
|
| 57 |
+
if len(lab.shape) == 2:
|
| 58 |
+
lab = np.expand_dims(lab, axis=0)
|
| 59 |
+
labels[i] = torch.from_numpy(lab).float()
|
| 60 |
+
for i, intrinsic in enumerate(intrinsics):
|
| 61 |
+
if len(intrinsic) == 3:
|
| 62 |
+
intrinsic = [intrinsic[0],] + intrinsic
|
| 63 |
+
intrinsics[i] = torch.tensor(intrinsic, dtype=torch.float)
|
| 64 |
+
if cam_models is not None:
|
| 65 |
+
for i, cam_model in enumerate(cam_models):
|
| 66 |
+
cam_models[i] = torch.from_numpy(cam_model.transpose((2, 0, 1))).float() if cam_model is not None else None
|
| 67 |
+
if normals is not None:
|
| 68 |
+
for i, normal in enumerate(normals):
|
| 69 |
+
normals[i] = torch.from_numpy(normal.transpose((2, 0, 1))).float()
|
| 70 |
+
if other_labels is not None:
|
| 71 |
+
for i, lab in enumerate(other_labels):
|
| 72 |
+
if len(lab.shape) == 2:
|
| 73 |
+
lab = np.expand_dims(lab, axis=0)
|
| 74 |
+
other_labels[i] = torch.from_numpy(lab).float()
|
| 75 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 76 |
+
|
| 77 |
+
class Normalize(object):
|
| 78 |
+
# Normalize tensor with mean and standard deviation along channel: channel = (channel - mean) / std
|
| 79 |
+
def __init__(self, mean, std=None, **kwargs):
|
| 80 |
+
if std is None:
|
| 81 |
+
assert len(mean) > 0
|
| 82 |
+
else:
|
| 83 |
+
assert len(mean) == len(std)
|
| 84 |
+
self.mean = torch.tensor(mean).float()[:, None, None]
|
| 85 |
+
self.std = torch.tensor(std).float()[:, None, None] if std is not None \
|
| 86 |
+
else torch.tensor([1.0, 1.0, 1.0]).float()[:, None, None]
|
| 87 |
+
|
| 88 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 89 |
+
# if self.std is None:
|
| 90 |
+
# # for t, m in zip(image, self.mean):
|
| 91 |
+
# # t.sub(m)
|
| 92 |
+
# image = image - self.mean
|
| 93 |
+
# if ref_images is not None:
|
| 94 |
+
# for i, ref_i in enumerate(ref_images):
|
| 95 |
+
# ref_images[i] = ref_i - self.mean
|
| 96 |
+
# else:
|
| 97 |
+
# # for t, m, s in zip(image, self.mean, self.std):
|
| 98 |
+
# # t.sub(m).div(s)
|
| 99 |
+
# image = (image - self.mean) / self.std
|
| 100 |
+
# if ref_images is not None:
|
| 101 |
+
# for i, ref_i in enumerate(ref_images):
|
| 102 |
+
# ref_images[i] = (ref_i - self.mean) / self.std
|
| 103 |
+
for i, img in enumerate(images):
|
| 104 |
+
img = torch.div((img - self.mean), self.std)
|
| 105 |
+
images[i] = img
|
| 106 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 107 |
+
|
| 108 |
+
class ResizeCanonical(object):
|
| 109 |
+
"""
|
| 110 |
+
Resize the input to the canonical space first, then resize the input with random sampled size.
|
| 111 |
+
In the first stage, we assume the distance holds while the camera model varies.
|
| 112 |
+
In the second stage, we aim to simulate the observation in different distance. The camera will move along the optical axis.
|
| 113 |
+
Args:
|
| 114 |
+
images: list of RGB images.
|
| 115 |
+
labels: list of depth/disparity labels.
|
| 116 |
+
other labels: other labels, such as instance segmentations, semantic segmentations...
|
| 117 |
+
"""
|
| 118 |
+
def __init__(self, **kwargs):
|
| 119 |
+
self.ratio_range = kwargs['ratio_range']
|
| 120 |
+
self.canonical_focal = kwargs['focal_length']
|
| 121 |
+
self.crop_size = kwargs['crop_size']
|
| 122 |
+
|
| 123 |
+
def random_on_canonical_transform(self, image, label, intrinsic, cam_model, to_random_ratio):
|
| 124 |
+
ori_h, ori_w, _ = image.shape
|
| 125 |
+
ori_focal = (intrinsic[0] + intrinsic[1]) / 2.0
|
| 126 |
+
|
| 127 |
+
to_canonical_ratio = self.canonical_focal / ori_focal
|
| 128 |
+
to_scale_ratio = to_random_ratio
|
| 129 |
+
resize_ratio = to_canonical_ratio * to_random_ratio
|
| 130 |
+
reshape_h = int(ori_h * resize_ratio + 0.5)
|
| 131 |
+
reshape_w = int(ori_w * resize_ratio + 0.5)
|
| 132 |
+
|
| 133 |
+
image = cv2.resize(image, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 134 |
+
if intrinsic is not None:
|
| 135 |
+
intrinsic = [self.canonical_focal, self.canonical_focal, intrinsic[2]*resize_ratio, intrinsic[3]*resize_ratio]
|
| 136 |
+
if label is not None:
|
| 137 |
+
# number of other labels may be less than that of image
|
| 138 |
+
label = cv2.resize(label, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 139 |
+
# scale the label and camera intrinsics
|
| 140 |
+
label = label / to_scale_ratio
|
| 141 |
+
|
| 142 |
+
if cam_model is not None:
|
| 143 |
+
# Should not directly resize the cam_model.
|
| 144 |
+
# Camera model should be resized in 'to canonical' stage, while it holds in 'random resizing' stage.
|
| 145 |
+
# cam_model = cv2.resize(cam_model, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 146 |
+
cam_model = build_camera_model(reshape_h, reshape_w, intrinsic)
|
| 147 |
+
|
| 148 |
+
return image, label, intrinsic, cam_model, to_scale_ratio
|
| 149 |
+
|
| 150 |
+
def random_on_crop_transform(self, image, label, intrinsic, cam_model, to_random_ratio):
|
| 151 |
+
ori_h, ori_w, _ = image.shape
|
| 152 |
+
crop_h, crop_w = self.crop_size
|
| 153 |
+
ori_focal = (intrinsic[0] + intrinsic[1]) / 2.0
|
| 154 |
+
|
| 155 |
+
to_canonical_ratio = self.canonical_focal / ori_focal
|
| 156 |
+
|
| 157 |
+
# random resize based on the last crop size
|
| 158 |
+
proposal_reshape_h = int(crop_h * to_random_ratio + 0.5)
|
| 159 |
+
proposal_reshape_w = int(crop_w * to_random_ratio + 0.5)
|
| 160 |
+
resize_ratio_h = proposal_reshape_h / ori_h
|
| 161 |
+
resize_ratio_w = proposal_reshape_w / ori_w
|
| 162 |
+
resize_ratio = min(resize_ratio_h, resize_ratio_w) # resize based on the long edge
|
| 163 |
+
reshape_h = int(ori_h * resize_ratio + 0.5)
|
| 164 |
+
reshape_w = int(ori_w * resize_ratio + 0.5)
|
| 165 |
+
|
| 166 |
+
to_scale_ratio = resize_ratio / to_canonical_ratio
|
| 167 |
+
|
| 168 |
+
image = cv2.resize(image, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 169 |
+
if intrinsic is not None:
|
| 170 |
+
intrinsic = [self.canonical_focal, self.canonical_focal, intrinsic[2]*resize_ratio, intrinsic[3]*resize_ratio]
|
| 171 |
+
if label is not None:
|
| 172 |
+
# number of other labels may be less than that of image
|
| 173 |
+
label = cv2.resize(label, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 174 |
+
# scale the label and camera intrinsics
|
| 175 |
+
label = label / to_scale_ratio
|
| 176 |
+
|
| 177 |
+
if cam_model is not None:
|
| 178 |
+
# Should not directly resize the cam_model.
|
| 179 |
+
# Camera model should be resized in 'to canonical' stage, while it holds in 'random resizing' stage.
|
| 180 |
+
# cam_model = cv2.resize(cam_model, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 181 |
+
cam_model = build_camera_model(reshape_h, reshape_w, intrinsic)
|
| 182 |
+
return image, label, intrinsic, cam_model, to_scale_ratio
|
| 183 |
+
|
| 184 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 185 |
+
assert len(images[0].shape) == 3 and len(labels[0].shape) == 2
|
| 186 |
+
assert labels[0].dtype == np.float
|
| 187 |
+
target_focal = (intrinsics[0][0] + intrinsics[0][1]) / 2.0
|
| 188 |
+
target_to_canonical_ratio = self.canonical_focal / target_focal
|
| 189 |
+
target_img_shape = images[0].shape
|
| 190 |
+
to_random_ratio = random.uniform(self.ratio_range[0], self.ratio_range[1])
|
| 191 |
+
to_scale_ratio = 0.0
|
| 192 |
+
for i in range(len(images)):
|
| 193 |
+
img = images[i]
|
| 194 |
+
label = labels[i] if i < len(labels) else None
|
| 195 |
+
intrinsic = intrinsics[i] if i < len(intrinsics) else None
|
| 196 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 197 |
+
img, label, intrinsic, cam_model, to_scale_ratio = self.random_on_canonical_transform(
|
| 198 |
+
img, label, intrinsic, cam_model, to_random_ratio)
|
| 199 |
+
|
| 200 |
+
images[i] = img
|
| 201 |
+
if label is not None:
|
| 202 |
+
labels[i] = label
|
| 203 |
+
if intrinsic is not None:
|
| 204 |
+
intrinsics[i] = intrinsic
|
| 205 |
+
if cam_model is not None:
|
| 206 |
+
cam_models[i] = cam_model
|
| 207 |
+
|
| 208 |
+
if normals != None:
|
| 209 |
+
reshape_h, reshape_w, _ = images[0].shape
|
| 210 |
+
for i, normal in enumerate(normals):
|
| 211 |
+
normals[i] = cv2.resize(normal, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 212 |
+
|
| 213 |
+
if other_labels != None:
|
| 214 |
+
# other labels are like semantic segmentations, instance segmentations, instance planes segmentations...
|
| 215 |
+
#resize_ratio = target_to_canonical_ratio * to_scale_ratio
|
| 216 |
+
#reshape_h = int(target_img_shape[0] * resize_ratio + 0.5)
|
| 217 |
+
#reshape_w = int(target_img_shape[1] * resize_ratio + 0.5)
|
| 218 |
+
reshape_h, reshape_w, _ = images[0].shape
|
| 219 |
+
for i, other_label_i in enumerate(other_labels):
|
| 220 |
+
other_labels[i] = cv2.resize(other_label_i, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 221 |
+
|
| 222 |
+
if transform_paras is not None:
|
| 223 |
+
transform_paras.update(label_scale_factor = 1.0/to_scale_ratio)
|
| 224 |
+
|
| 225 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
class LabelScaleCononical(object):
|
| 229 |
+
"""
|
| 230 |
+
To solve the ambiguity observation for the mono branch, i.e. different focal length (object size) with the same depth, cameras are
|
| 231 |
+
mapped to a cononical space. To mimic this, we set the focal length to a canonical one and scale the depth value. NOTE: resize the image based on the ratio can also solve this ambiguity.
|
| 232 |
+
Args:
|
| 233 |
+
images: list of RGB images.
|
| 234 |
+
labels: list of depth/disparity labels.
|
| 235 |
+
other labels: other labels, such as instance segmentations, semantic segmentations...
|
| 236 |
+
"""
|
| 237 |
+
def __init__(self, **kwargs):
|
| 238 |
+
self.canonical_focal = kwargs['focal_length']
|
| 239 |
+
|
| 240 |
+
def _get_scale_ratio(self, intrinsic):
|
| 241 |
+
target_focal_x = intrinsic[0]
|
| 242 |
+
label_scale_ratio = self.canonical_focal / target_focal_x
|
| 243 |
+
pose_scale_ratio = 1.0
|
| 244 |
+
return label_scale_ratio, pose_scale_ratio
|
| 245 |
+
|
| 246 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 247 |
+
assert len(images[0].shape) == 3 and len(labels[0].shape) == 2
|
| 248 |
+
#assert labels[0].dtype == np.float
|
| 249 |
+
|
| 250 |
+
label_scale_ratio = None
|
| 251 |
+
pose_scale_ratio = None
|
| 252 |
+
|
| 253 |
+
for i in range(len(intrinsics)):
|
| 254 |
+
img_i = images[i]
|
| 255 |
+
label_i = labels[i] if i < len(labels) else None
|
| 256 |
+
intrinsic_i = intrinsics[i].copy()
|
| 257 |
+
cam_model_i = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 258 |
+
|
| 259 |
+
label_scale_ratio, pose_scale_ratio = self._get_scale_ratio(intrinsic_i)
|
| 260 |
+
|
| 261 |
+
# adjust the focal length, map the current camera to the canonical space
|
| 262 |
+
intrinsics[i] = [intrinsic_i[0]*label_scale_ratio, intrinsic_i[1]*label_scale_ratio, intrinsic_i[2], intrinsic_i[3]]
|
| 263 |
+
|
| 264 |
+
# scale the label to the canonical space
|
| 265 |
+
if label_i is not None:
|
| 266 |
+
labels[i] = label_i * label_scale_ratio
|
| 267 |
+
|
| 268 |
+
if cam_model_i is not None:
|
| 269 |
+
# As the focal length is adjusted (canonical focal length), the camera model should be re-built.
|
| 270 |
+
ori_h, ori_w, _ = img_i.shape
|
| 271 |
+
cam_models[i] = build_camera_model(ori_h, ori_w, intrinsics[i])
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
if transform_paras is not None:
|
| 275 |
+
transform_paras.update(label_scale_factor = label_scale_ratio)
|
| 276 |
+
|
| 277 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class ResizeKeepRatio(object):
|
| 282 |
+
"""
|
| 283 |
+
Resize and pad to a given size. Hold the aspect ratio.
|
| 284 |
+
This resizing assumes that the camera model remains unchanged.
|
| 285 |
+
Args:
|
| 286 |
+
resize_size: predefined output size.
|
| 287 |
+
"""
|
| 288 |
+
def __init__(self, resize_size, padding=None, ignore_label=-1, **kwargs):
|
| 289 |
+
if isinstance(resize_size, int):
|
| 290 |
+
self.resize_h = resize_size
|
| 291 |
+
self.resize_w = resize_size
|
| 292 |
+
elif isinstance(resize_size, collections.Iterable) and len(resize_size) == 2 \
|
| 293 |
+
and isinstance(resize_size[0], int) and isinstance(resize_size[1], int) \
|
| 294 |
+
and resize_size[0] > 0 and resize_size[1] > 0:
|
| 295 |
+
self.resize_h = resize_size[0]
|
| 296 |
+
self.resize_w = resize_size[1]
|
| 297 |
+
else:
|
| 298 |
+
raise (RuntimeError("crop size error.\n"))
|
| 299 |
+
if padding is None:
|
| 300 |
+
self.padding = padding
|
| 301 |
+
elif isinstance(padding, list):
|
| 302 |
+
if all(isinstance(i, numbers.Number) for i in padding):
|
| 303 |
+
self.padding = padding
|
| 304 |
+
else:
|
| 305 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 306 |
+
if len(padding) != 3:
|
| 307 |
+
raise (RuntimeError("padding channel is not equal with 3\n"))
|
| 308 |
+
else:
|
| 309 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 310 |
+
if isinstance(ignore_label, int):
|
| 311 |
+
self.ignore_label = ignore_label
|
| 312 |
+
else:
|
| 313 |
+
raise (RuntimeError("ignore_label should be an integer number\n"))
|
| 314 |
+
self.crop_size = kwargs['crop_size']
|
| 315 |
+
self.canonical_focal = kwargs['focal_length']
|
| 316 |
+
|
| 317 |
+
def main_data_transform(self, image, label, intrinsic, cam_model, resize_ratio, padding, to_scale_ratio):
|
| 318 |
+
"""
|
| 319 |
+
Resize data first and then do the padding.
|
| 320 |
+
'label' will be scaled.
|
| 321 |
+
"""
|
| 322 |
+
h, w, _ = image.shape
|
| 323 |
+
reshape_h = int(resize_ratio * h)
|
| 324 |
+
reshape_w = int(resize_ratio * w)
|
| 325 |
+
|
| 326 |
+
pad_h, pad_w, pad_h_half, pad_w_half = padding
|
| 327 |
+
|
| 328 |
+
# resize
|
| 329 |
+
image = cv2.resize(image, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 330 |
+
# padding
|
| 331 |
+
image = cv2.copyMakeBorder(
|
| 332 |
+
image,
|
| 333 |
+
pad_h_half,
|
| 334 |
+
pad_h - pad_h_half,
|
| 335 |
+
pad_w_half,
|
| 336 |
+
pad_w - pad_w_half,
|
| 337 |
+
cv2.BORDER_CONSTANT,
|
| 338 |
+
value=self.padding)
|
| 339 |
+
|
| 340 |
+
if label is not None:
|
| 341 |
+
# label = cv2.resize(label, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 342 |
+
label = resize_depth_preserve(label, (reshape_h, reshape_w))
|
| 343 |
+
label = cv2.copyMakeBorder(
|
| 344 |
+
label,
|
| 345 |
+
pad_h_half,
|
| 346 |
+
pad_h - pad_h_half,
|
| 347 |
+
pad_w_half,
|
| 348 |
+
pad_w - pad_w_half,
|
| 349 |
+
cv2.BORDER_CONSTANT,
|
| 350 |
+
value=self.ignore_label)
|
| 351 |
+
# scale the label
|
| 352 |
+
label = label / to_scale_ratio
|
| 353 |
+
|
| 354 |
+
# Resize, adjust principle point
|
| 355 |
+
if intrinsic is not None:
|
| 356 |
+
intrinsic[2] = intrinsic[2] * resize_ratio
|
| 357 |
+
intrinsic[3] = intrinsic[3] * resize_ratio
|
| 358 |
+
|
| 359 |
+
if cam_model is not None:
|
| 360 |
+
#cam_model = cv2.resize(cam_model, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 361 |
+
cam_model = build_camera_model(reshape_h, reshape_w, intrinsic)
|
| 362 |
+
cam_model = cv2.copyMakeBorder(
|
| 363 |
+
cam_model,
|
| 364 |
+
pad_h_half,
|
| 365 |
+
pad_h - pad_h_half,
|
| 366 |
+
pad_w_half,
|
| 367 |
+
pad_w - pad_w_half,
|
| 368 |
+
cv2.BORDER_CONSTANT,
|
| 369 |
+
value=self.ignore_label)
|
| 370 |
+
|
| 371 |
+
# Pad, adjust the principle point
|
| 372 |
+
if intrinsic is not None:
|
| 373 |
+
intrinsic[2] = intrinsic[2] + pad_w_half
|
| 374 |
+
intrinsic[3] = intrinsic[3] + pad_h_half
|
| 375 |
+
return image, label, intrinsic, cam_model
|
| 376 |
+
|
| 377 |
+
def get_label_scale_factor(self, image, intrinsic, resize_ratio):
|
| 378 |
+
ori_h, ori_w, _ = image.shape
|
| 379 |
+
crop_h, crop_w = self.crop_size
|
| 380 |
+
ori_focal = (intrinsic[0] + intrinsic[1]) / 2.0 #intrinsic[0] #
|
| 381 |
+
|
| 382 |
+
to_canonical_ratio = self.canonical_focal / ori_focal
|
| 383 |
+
to_scale_ratio = resize_ratio / to_canonical_ratio
|
| 384 |
+
return to_scale_ratio
|
| 385 |
+
|
| 386 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 387 |
+
target_h, target_w, _ = images[0].shape
|
| 388 |
+
resize_ratio_h = self.resize_h / target_h
|
| 389 |
+
resize_ratio_w = self.resize_w / target_w
|
| 390 |
+
resize_ratio = min(resize_ratio_h, resize_ratio_w)
|
| 391 |
+
reshape_h = int(resize_ratio * target_h)
|
| 392 |
+
reshape_w = int(resize_ratio * target_w)
|
| 393 |
+
pad_h = max(self.resize_h - reshape_h, 0)
|
| 394 |
+
pad_w = max(self.resize_w - reshape_w, 0)
|
| 395 |
+
pad_h_half = int(pad_h / 2)
|
| 396 |
+
pad_w_half = int(pad_w / 2)
|
| 397 |
+
|
| 398 |
+
pad_info = [pad_h, pad_w, pad_h_half, pad_w_half]
|
| 399 |
+
to_scale_ratio = self.get_label_scale_factor(images[0], intrinsics[0], resize_ratio)
|
| 400 |
+
|
| 401 |
+
for i in range(len(images)):
|
| 402 |
+
img = images[i]
|
| 403 |
+
label = labels[i] if i < len(labels) else None
|
| 404 |
+
intrinsic = intrinsics[i] if i < len(intrinsics) else None
|
| 405 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 406 |
+
img, label, intrinsic, cam_model = self.main_data_transform(
|
| 407 |
+
img, label, intrinsic, cam_model, resize_ratio, pad_info, to_scale_ratio)
|
| 408 |
+
images[i] = img
|
| 409 |
+
if label is not None:
|
| 410 |
+
labels[i] = label
|
| 411 |
+
if intrinsic is not None:
|
| 412 |
+
intrinsics[i] = intrinsic
|
| 413 |
+
if cam_model is not None:
|
| 414 |
+
cam_models[i] = cam_model
|
| 415 |
+
|
| 416 |
+
if normals is not None:
|
| 417 |
+
for i, normal in enumerate(normals):
|
| 418 |
+
normal = cv2.resize(normal, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 419 |
+
# pad
|
| 420 |
+
normals[i] = cv2.copyMakeBorder(
|
| 421 |
+
normal,
|
| 422 |
+
pad_h_half,
|
| 423 |
+
pad_h - pad_h_half,
|
| 424 |
+
pad_w_half,
|
| 425 |
+
pad_w - pad_w_half,
|
| 426 |
+
cv2.BORDER_CONSTANT,
|
| 427 |
+
value=0)
|
| 428 |
+
|
| 429 |
+
if other_labels is not None:
|
| 430 |
+
|
| 431 |
+
for i, other_lab in enumerate(other_labels):
|
| 432 |
+
# resize
|
| 433 |
+
other_lab = cv2.resize(other_lab, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 434 |
+
# pad
|
| 435 |
+
other_labels[i] = cv2.copyMakeBorder(
|
| 436 |
+
other_lab,
|
| 437 |
+
pad_h_half,
|
| 438 |
+
pad_h - pad_h_half,
|
| 439 |
+
pad_w_half,
|
| 440 |
+
pad_w - pad_w_half,
|
| 441 |
+
cv2.BORDER_CONSTANT,
|
| 442 |
+
value=self.ignore_label)
|
| 443 |
+
|
| 444 |
+
|
| 445 |
+
if transform_paras is not None:
|
| 446 |
+
transform_paras.update(pad=[pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half])
|
| 447 |
+
if 'label_scale_factor' in transform_paras:
|
| 448 |
+
transform_paras['label_scale_factor'] = transform_paras['label_scale_factor'] * 1.0 / to_scale_ratio
|
| 449 |
+
else:
|
| 450 |
+
transform_paras.update(label_scale_factor=1.0/to_scale_ratio)
|
| 451 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 452 |
+
|
| 453 |
+
class KeepResizeCanoSize(object):
|
| 454 |
+
"""
|
| 455 |
+
Resize and pad to a given size. Hold the aspect ratio.
|
| 456 |
+
This resizing assumes that the camera model remains unchanged.
|
| 457 |
+
Args:
|
| 458 |
+
resize_size: predefined output size.
|
| 459 |
+
"""
|
| 460 |
+
def __init__(self, resize_size, padding=None, ignore_label=-1, **kwargs):
|
| 461 |
+
if isinstance(resize_size, int):
|
| 462 |
+
self.resize_h = resize_size
|
| 463 |
+
self.resize_w = resize_size
|
| 464 |
+
elif isinstance(resize_size, collections.Iterable) and len(resize_size) == 2 \
|
| 465 |
+
and isinstance(resize_size[0], int) and isinstance(resize_size[1], int) \
|
| 466 |
+
and resize_size[0] > 0 and resize_size[1] > 0:
|
| 467 |
+
self.resize_h = resize_size[0]
|
| 468 |
+
self.resize_w = resize_size[1]
|
| 469 |
+
else:
|
| 470 |
+
raise (RuntimeError("crop size error.\n"))
|
| 471 |
+
if padding is None:
|
| 472 |
+
self.padding = padding
|
| 473 |
+
elif isinstance(padding, list):
|
| 474 |
+
if all(isinstance(i, numbers.Number) for i in padding):
|
| 475 |
+
self.padding = padding
|
| 476 |
+
else:
|
| 477 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 478 |
+
if len(padding) != 3:
|
| 479 |
+
raise (RuntimeError("padding channel is not equal with 3\n"))
|
| 480 |
+
else:
|
| 481 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 482 |
+
if isinstance(ignore_label, int):
|
| 483 |
+
self.ignore_label = ignore_label
|
| 484 |
+
else:
|
| 485 |
+
raise (RuntimeError("ignore_label should be an integer number\n"))
|
| 486 |
+
self.crop_size = kwargs['crop_size']
|
| 487 |
+
self.canonical_focal = kwargs['focal_length']
|
| 488 |
+
|
| 489 |
+
def main_data_transform(self, image, label, intrinsic, cam_model, resize_ratio, padding, to_scale_ratio):
|
| 490 |
+
"""
|
| 491 |
+
Resize data first and then do the padding.
|
| 492 |
+
'label' will be scaled.
|
| 493 |
+
"""
|
| 494 |
+
h, w, _ = image.shape
|
| 495 |
+
reshape_h = int(resize_ratio * h)
|
| 496 |
+
reshape_w = int(resize_ratio * w)
|
| 497 |
+
|
| 498 |
+
pad_h, pad_w, pad_h_half, pad_w_half = padding
|
| 499 |
+
|
| 500 |
+
# resize
|
| 501 |
+
image = cv2.resize(image, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 502 |
+
# padding
|
| 503 |
+
image = cv2.copyMakeBorder(
|
| 504 |
+
image,
|
| 505 |
+
pad_h_half,
|
| 506 |
+
pad_h - pad_h_half,
|
| 507 |
+
pad_w_half,
|
| 508 |
+
pad_w - pad_w_half,
|
| 509 |
+
cv2.BORDER_CONSTANT,
|
| 510 |
+
value=self.padding)
|
| 511 |
+
|
| 512 |
+
if label is not None:
|
| 513 |
+
# label = cv2.resize(label, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 514 |
+
label = resize_depth_preserve(label, (reshape_h, reshape_w))
|
| 515 |
+
label = cv2.copyMakeBorder(
|
| 516 |
+
label,
|
| 517 |
+
pad_h_half,
|
| 518 |
+
pad_h - pad_h_half,
|
| 519 |
+
pad_w_half,
|
| 520 |
+
pad_w - pad_w_half,
|
| 521 |
+
cv2.BORDER_CONSTANT,
|
| 522 |
+
value=self.ignore_label)
|
| 523 |
+
# scale the label
|
| 524 |
+
label = label / to_scale_ratio
|
| 525 |
+
|
| 526 |
+
# Resize, adjust principle point
|
| 527 |
+
if intrinsic is not None:
|
| 528 |
+
intrinsic[2] = intrinsic[2] * resize_ratio
|
| 529 |
+
intrinsic[3] = intrinsic[3] * resize_ratio
|
| 530 |
+
|
| 531 |
+
if cam_model is not None:
|
| 532 |
+
#cam_model = cv2.resize(cam_model, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 533 |
+
cam_model = build_camera_model(reshape_h, reshape_w, intrinsic)
|
| 534 |
+
cam_model = cv2.copyMakeBorder(
|
| 535 |
+
cam_model,
|
| 536 |
+
pad_h_half,
|
| 537 |
+
pad_h - pad_h_half,
|
| 538 |
+
pad_w_half,
|
| 539 |
+
pad_w - pad_w_half,
|
| 540 |
+
cv2.BORDER_CONSTANT,
|
| 541 |
+
value=self.ignore_label)
|
| 542 |
+
|
| 543 |
+
# Pad, adjust the principle point
|
| 544 |
+
if intrinsic is not None:
|
| 545 |
+
intrinsic[2] = intrinsic[2] + pad_w_half
|
| 546 |
+
intrinsic[3] = intrinsic[3] + pad_h_half
|
| 547 |
+
return image, label, intrinsic, cam_model
|
| 548 |
+
|
| 549 |
+
# def get_label_scale_factor(self, image, intrinsic, resize_ratio):
|
| 550 |
+
# ori_h, ori_w, _ = image.shape
|
| 551 |
+
# crop_h, crop_w = self.crop_size
|
| 552 |
+
# ori_focal = intrinsic[0] #(intrinsic[0] + intrinsic[1]) / 2.0
|
| 553 |
+
|
| 554 |
+
# to_canonical_ratio = self.canonical_focal / ori_focal
|
| 555 |
+
# to_scale_ratio = resize_ratio / to_canonical_ratio
|
| 556 |
+
# return to_scale_ratio
|
| 557 |
+
|
| 558 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 559 |
+
target_h, target_w, _ = images[0].shape
|
| 560 |
+
ori_focal = intrinsics[0][0]
|
| 561 |
+
to_canonical_ratio = self.canonical_focal / ori_focal
|
| 562 |
+
|
| 563 |
+
resize_ratio = to_canonical_ratio
|
| 564 |
+
reshape_h = int(resize_ratio * target_h)
|
| 565 |
+
reshape_w = int(resize_ratio * target_w)
|
| 566 |
+
|
| 567 |
+
pad_h = 32 - reshape_h % 32
|
| 568 |
+
pad_w = 32 - reshape_w % 32
|
| 569 |
+
pad_h_half = int(pad_h / 2)
|
| 570 |
+
pad_w_half = int(pad_w / 2)
|
| 571 |
+
|
| 572 |
+
pad_info = [pad_h, pad_w, pad_h_half, pad_w_half]
|
| 573 |
+
to_scale_ratio = 1.0
|
| 574 |
+
|
| 575 |
+
for i in range(len(images)):
|
| 576 |
+
img = images[i]
|
| 577 |
+
label = labels[i] if i < len(labels) else None
|
| 578 |
+
intrinsic = intrinsics[i] if i < len(intrinsics) else None
|
| 579 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 580 |
+
img, label, intrinsic, cam_model = self.main_data_transform(
|
| 581 |
+
img, label, intrinsic, cam_model, resize_ratio, pad_info, to_scale_ratio)
|
| 582 |
+
images[i] = img
|
| 583 |
+
if label is not None:
|
| 584 |
+
labels[i] = label
|
| 585 |
+
if intrinsic is not None:
|
| 586 |
+
intrinsics[i] = intrinsic
|
| 587 |
+
if cam_model is not None:
|
| 588 |
+
cam_models[i] = cam_model
|
| 589 |
+
|
| 590 |
+
if normals is not None:
|
| 591 |
+
|
| 592 |
+
for i, normal in enumerate(normals):
|
| 593 |
+
# resize
|
| 594 |
+
normal = cv2.resize(normal, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 595 |
+
# pad
|
| 596 |
+
normals[i] = cv2.copyMakeBorder(
|
| 597 |
+
normal,
|
| 598 |
+
pad_h_half,
|
| 599 |
+
pad_h - pad_h_half,
|
| 600 |
+
pad_w_half,
|
| 601 |
+
pad_w - pad_w_half,
|
| 602 |
+
cv2.BORDER_CONSTANT,
|
| 603 |
+
value=0)
|
| 604 |
+
|
| 605 |
+
if other_labels is not None:
|
| 606 |
+
|
| 607 |
+
for i, other_lab in enumerate(other_labels):
|
| 608 |
+
# resize
|
| 609 |
+
other_lab = cv2.resize(other_lab, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 610 |
+
# pad
|
| 611 |
+
other_labels[i] = cv2.copyMakeBorder(
|
| 612 |
+
other_lab,
|
| 613 |
+
pad_h_half,
|
| 614 |
+
pad_h - pad_h_half,
|
| 615 |
+
pad_w_half,
|
| 616 |
+
pad_w - pad_w_half,
|
| 617 |
+
cv2.BORDER_CONSTANT,
|
| 618 |
+
value=self.ignore_label)
|
| 619 |
+
|
| 620 |
+
|
| 621 |
+
if transform_paras is not None:
|
| 622 |
+
transform_paras.update(pad=[pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half])
|
| 623 |
+
if 'label_scale_factor' in transform_paras:
|
| 624 |
+
transform_paras['label_scale_factor'] = transform_paras['label_scale_factor'] * 1.0 / to_scale_ratio
|
| 625 |
+
else:
|
| 626 |
+
transform_paras.update(label_scale_factor=1.0/to_scale_ratio)
|
| 627 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
class RandomCrop(object):
|
| 631 |
+
"""Crops the given ndarray image (H*W*C or H*W).
|
| 632 |
+
Args:
|
| 633 |
+
size (sequence or int): Desired output size of the crop. If size is an
|
| 634 |
+
int instead of sequence like (h, w), a square crop (size, size) is made.
|
| 635 |
+
"""
|
| 636 |
+
def __init__(self, crop_size, crop_type='center', padding=None, ignore_label=-1, **kwargs):
|
| 637 |
+
if isinstance(crop_size, int):
|
| 638 |
+
self.crop_h = crop_size
|
| 639 |
+
self.crop_w = crop_size
|
| 640 |
+
elif isinstance(crop_size, collections.Iterable) and len(crop_size) == 2 \
|
| 641 |
+
and isinstance(crop_size[0], int) and isinstance(crop_size[1], int) \
|
| 642 |
+
and crop_size[0] > 0 and crop_size[1] > 0:
|
| 643 |
+
self.crop_h = crop_size[0]
|
| 644 |
+
self.crop_w = crop_size[1]
|
| 645 |
+
else:
|
| 646 |
+
raise (RuntimeError("crop size error.\n"))
|
| 647 |
+
if crop_type == 'center' or crop_type == 'rand' or crop_type=='rand_in_field':
|
| 648 |
+
self.crop_type = crop_type
|
| 649 |
+
else:
|
| 650 |
+
raise (RuntimeError("crop type error: rand | center | rand_in_field \n"))
|
| 651 |
+
if padding is None:
|
| 652 |
+
self.padding = padding
|
| 653 |
+
elif isinstance(padding, list):
|
| 654 |
+
if all(isinstance(i, numbers.Number) for i in padding):
|
| 655 |
+
self.padding = padding
|
| 656 |
+
else:
|
| 657 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 658 |
+
if len(padding) != 3:
|
| 659 |
+
raise (RuntimeError("padding channel is not equal with 3\n"))
|
| 660 |
+
else:
|
| 661 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 662 |
+
if isinstance(ignore_label, int):
|
| 663 |
+
self.ignore_label = ignore_label
|
| 664 |
+
else:
|
| 665 |
+
raise (RuntimeError("ignore_label should be an integer number\n"))
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
def cal_padding_paras(self, h, w):
|
| 669 |
+
# padding if current size is not satisfied
|
| 670 |
+
pad_h = max(self.crop_h - h, 0)
|
| 671 |
+
pad_w = max(self.crop_w - w, 0)
|
| 672 |
+
pad_h_half = int(pad_h / 2)
|
| 673 |
+
pad_w_half = int(pad_w / 2)
|
| 674 |
+
return pad_h, pad_w, pad_h_half, pad_w_half
|
| 675 |
+
|
| 676 |
+
def cal_cropping_paras(self, h, w, intrinsic):
|
| 677 |
+
u0 = intrinsic[2]
|
| 678 |
+
v0 = intrinsic[3]
|
| 679 |
+
if self.crop_type == 'rand':
|
| 680 |
+
h_min = 0
|
| 681 |
+
h_max = h - self.crop_h
|
| 682 |
+
w_min = 0
|
| 683 |
+
w_max = w - self.crop_w
|
| 684 |
+
elif self.crop_type == 'center':
|
| 685 |
+
h_min = (h - self.crop_h) / 2
|
| 686 |
+
h_max = (h - self.crop_h) / 2
|
| 687 |
+
w_min = (w - self.crop_w) / 2
|
| 688 |
+
w_max = (w - self.crop_w) / 2
|
| 689 |
+
else: # rand in field
|
| 690 |
+
h_min = min(max(0, v0 - 0.75*self.crop_h), h-self.crop_h)
|
| 691 |
+
h_max = min(max(v0 - 0.25*self.crop_h, 0), h-self.crop_h)
|
| 692 |
+
w_min = min(max(0, u0 - 0.75*self.crop_w), w-self.crop_w)
|
| 693 |
+
w_max = min(max(u0 - 0.25*self.crop_w, 0), w-self.crop_w)
|
| 694 |
+
|
| 695 |
+
h_off = random.randint(int(h_min), int(h_max))
|
| 696 |
+
w_off = random.randint(int(w_min), int(w_max))
|
| 697 |
+
return h_off, w_off
|
| 698 |
+
|
| 699 |
+
def main_data_transform(self, image, label, intrinsic, cam_model,
|
| 700 |
+
pad_h, pad_w, pad_h_half, pad_w_half, h_off, w_off):
|
| 701 |
+
|
| 702 |
+
# padding if current size is not satisfied
|
| 703 |
+
if pad_h > 0 or pad_w > 0:
|
| 704 |
+
if self.padding is None:
|
| 705 |
+
raise (RuntimeError("depthtransform.Crop() need padding while padding argument is None\n"))
|
| 706 |
+
image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.padding)
|
| 707 |
+
if label is not None:
|
| 708 |
+
label = cv2.copyMakeBorder(label, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.ignore_label)
|
| 709 |
+
if cam_model is not None:
|
| 710 |
+
cam_model = cv2.copyMakeBorder(cam_model, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.ignore_label)
|
| 711 |
+
|
| 712 |
+
# cropping
|
| 713 |
+
image = image[h_off:h_off+self.crop_h, w_off:w_off+self.crop_w]
|
| 714 |
+
if label is not None:
|
| 715 |
+
label = label[h_off:h_off+self.crop_h, w_off:w_off+self.crop_w]
|
| 716 |
+
if cam_model is not None:
|
| 717 |
+
cam_model = cam_model[h_off:h_off+self.crop_h, w_off:w_off+self.crop_w]
|
| 718 |
+
|
| 719 |
+
if intrinsic is not None:
|
| 720 |
+
intrinsic[2] = intrinsic[2] + pad_w_half - w_off
|
| 721 |
+
intrinsic[3] = intrinsic[3] + pad_h_half - h_off
|
| 722 |
+
return image, label, intrinsic, cam_model
|
| 723 |
+
|
| 724 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 725 |
+
if 'random_crop_size' in transform_paras and transform_paras['random_crop_size'] is not None \
|
| 726 |
+
and (transform_paras['random_crop_size'][0] + transform_paras['random_crop_size'][1] > 500):
|
| 727 |
+
self.crop_h = int(transform_paras['random_crop_size'][0].item())
|
| 728 |
+
self.crop_w = int(transform_paras['random_crop_size'][1].item())
|
| 729 |
+
target_img = images[0]
|
| 730 |
+
target_h, target_w, _ = target_img.shape
|
| 731 |
+
target_intrinsic = intrinsics[0]
|
| 732 |
+
pad_h, pad_w, pad_h_half, pad_w_half = self.cal_padding_paras(target_h, target_w)
|
| 733 |
+
h_off, w_off = self.cal_cropping_paras(target_h+pad_h, target_w+pad_w, target_intrinsic)
|
| 734 |
+
|
| 735 |
+
for i in range(len(images)):
|
| 736 |
+
img = images[i]
|
| 737 |
+
label = labels[i] if i < len(labels) else None
|
| 738 |
+
intrinsic = intrinsics[i].copy() if i < len(intrinsics) else None
|
| 739 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 740 |
+
img, label, intrinsic, cam_model = self.main_data_transform(
|
| 741 |
+
img, label, intrinsic, cam_model,
|
| 742 |
+
pad_h, pad_w, pad_h_half, pad_w_half, h_off, w_off)
|
| 743 |
+
images[i] = img
|
| 744 |
+
if label is not None:
|
| 745 |
+
labels[i] = label
|
| 746 |
+
if intrinsic is not None:
|
| 747 |
+
intrinsics[i] = intrinsic
|
| 748 |
+
if cam_model is not None:
|
| 749 |
+
cam_models[i] = cam_model
|
| 750 |
+
pad=[pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half]
|
| 751 |
+
if normals is not None:
|
| 752 |
+
for i, normal in enumerate(normals):
|
| 753 |
+
# padding if current size is not satisfied
|
| 754 |
+
normal = cv2.copyMakeBorder(normal, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=0)
|
| 755 |
+
normals[i] = normal[h_off:h_off+self.crop_h, w_off:w_off+self.crop_w]
|
| 756 |
+
if other_labels is not None:
|
| 757 |
+
for i, other_lab in enumerate(other_labels):
|
| 758 |
+
# padding if current size is not satisfied
|
| 759 |
+
other_lab = cv2.copyMakeBorder(other_lab, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.ignore_label)
|
| 760 |
+
other_labels[i] = other_lab[h_off:h_off+self.crop_h, w_off:w_off+self.crop_w]
|
| 761 |
+
if transform_paras is not None:
|
| 762 |
+
transform_paras.update(dict(pad=pad))
|
| 763 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
class RandomResize(object):
|
| 767 |
+
"""
|
| 768 |
+
Random resize the image. During this process, the camera model is hold, and thus the depth label is scaled.
|
| 769 |
+
Args:
|
| 770 |
+
images: list of RGB images.
|
| 771 |
+
labels: list of depth/disparity labels.
|
| 772 |
+
other labels: other labels, such as instance segmentations, semantic segmentations...
|
| 773 |
+
"""
|
| 774 |
+
def __init__(self, ratio_range=(0.85, 1.15), prob=0.5, is_lidar=True, **kwargs):
|
| 775 |
+
self.ratio_range = ratio_range
|
| 776 |
+
self.is_lidar = is_lidar
|
| 777 |
+
self.prob = prob
|
| 778 |
+
|
| 779 |
+
def random_resize(self, image, label, intrinsic, cam_model, to_random_ratio):
|
| 780 |
+
ori_h, ori_w, _ = image.shape
|
| 781 |
+
|
| 782 |
+
resize_ratio = to_random_ratio
|
| 783 |
+
label_scale_ratio = 1.0 / resize_ratio
|
| 784 |
+
reshape_h = int(ori_h * resize_ratio + 0.5)
|
| 785 |
+
reshape_w = int(ori_w * resize_ratio + 0.5)
|
| 786 |
+
|
| 787 |
+
image = cv2.resize(image, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 788 |
+
if intrinsic is not None:
|
| 789 |
+
intrinsic = [intrinsic[0], intrinsic[1], intrinsic[2]*resize_ratio, intrinsic[3]*resize_ratio]
|
| 790 |
+
if label is not None:
|
| 791 |
+
if self.is_lidar:
|
| 792 |
+
label = resize_depth_preserve(label, (reshape_h, reshape_w))
|
| 793 |
+
else:
|
| 794 |
+
label = cv2.resize(label, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 795 |
+
# scale the label
|
| 796 |
+
label = label * label_scale_ratio
|
| 797 |
+
|
| 798 |
+
if cam_model is not None:
|
| 799 |
+
# Should not directly resize the cam_model.
|
| 800 |
+
# Camera model should be resized in 'to canonical' stage, while it holds in 'random resizing' stage.
|
| 801 |
+
# cam_model = cv2.resize(cam_model, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 802 |
+
cam_model = build_camera_model(reshape_h, reshape_w, intrinsic)
|
| 803 |
+
|
| 804 |
+
return image, label, intrinsic, cam_model, label_scale_ratio
|
| 805 |
+
|
| 806 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 807 |
+
assert len(images[0].shape) == 3 and len(labels[0].shape) == 2
|
| 808 |
+
assert labels[0].dtype == np.float
|
| 809 |
+
# target_focal = (intrinsics[0][0] + intrinsics[0][1]) / 2.0
|
| 810 |
+
# target_to_canonical_ratio = self.canonical_focal / target_focal
|
| 811 |
+
# target_img_shape = images[0].shape
|
| 812 |
+
prob = random.uniform(0, 1)
|
| 813 |
+
if prob < self.prob:
|
| 814 |
+
to_random_ratio = random.uniform(self.ratio_range[0], self.ratio_range[1])
|
| 815 |
+
else:
|
| 816 |
+
to_random_ratio = 1.0
|
| 817 |
+
label_scale_ratio = 0.0
|
| 818 |
+
for i in range(len(images)):
|
| 819 |
+
img = images[i]
|
| 820 |
+
label = labels[i] if i < len(labels) else None
|
| 821 |
+
intrinsic = intrinsics[i].copy() if i < len(intrinsics) else None
|
| 822 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 823 |
+
img, label, intrinsic, cam_model, label_scale_ratio = self.random_resize(
|
| 824 |
+
img, label, intrinsic, cam_model, to_random_ratio)
|
| 825 |
+
|
| 826 |
+
images[i] = img
|
| 827 |
+
if label is not None:
|
| 828 |
+
labels[i] = label
|
| 829 |
+
if intrinsic is not None:
|
| 830 |
+
intrinsics[i] = intrinsic.copy()
|
| 831 |
+
if cam_model is not None:
|
| 832 |
+
cam_models[i] = cam_model
|
| 833 |
+
|
| 834 |
+
if normals != None:
|
| 835 |
+
reshape_h, reshape_w, _ = images[0].shape
|
| 836 |
+
for i, norm in enumerate(normals):
|
| 837 |
+
normals[i] = cv2.resize(norm, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 838 |
+
|
| 839 |
+
|
| 840 |
+
if other_labels != None:
|
| 841 |
+
# other labels are like semantic segmentations, instance segmentations, instance planes segmentations...
|
| 842 |
+
#resize_ratio = target_to_canonical_ratio * to_scale_ratio
|
| 843 |
+
#reshape_h = int(target_img_shape[0] * resize_ratio + 0.5)
|
| 844 |
+
#reshape_w = int(target_img_shape[1] * resize_ratio + 0.5)
|
| 845 |
+
reshape_h, reshape_w, _ = images[0].shape
|
| 846 |
+
for i, other_label_i in enumerate(other_labels):
|
| 847 |
+
other_labels[i] = cv2.resize(other_label_i, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 848 |
+
|
| 849 |
+
if transform_paras is not None:
|
| 850 |
+
if 'label_scale_factor' in transform_paras:
|
| 851 |
+
transform_paras['label_scale_factor'] = transform_paras['label_scale_factor'] * label_scale_ratio
|
| 852 |
+
else:
|
| 853 |
+
transform_paras.update(label_scale_factor = label_scale_ratio)
|
| 854 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 855 |
+
|
| 856 |
+
class RandomEdgeMask(object):
|
| 857 |
+
"""
|
| 858 |
+
Random mask the input and labels.
|
| 859 |
+
Args:
|
| 860 |
+
images: list of RGB images.
|
| 861 |
+
labels: list of depth/disparity labels.
|
| 862 |
+
other labels: other labels, such as instance segmentations, semantic segmentations...
|
| 863 |
+
"""
|
| 864 |
+
def __init__(self, mask_maxsize=32, prob=0.5, rgb_invalid=[0,0,0], label_invalid=-1,**kwargs):
|
| 865 |
+
self.mask_maxsize = mask_maxsize
|
| 866 |
+
self.prob = prob
|
| 867 |
+
self.rgb_invalid = rgb_invalid
|
| 868 |
+
self.label_invalid = label_invalid
|
| 869 |
+
|
| 870 |
+
def mask_edge(self, image, mask_edgesize, mask_value):
|
| 871 |
+
H, W = image.shape[0], image.shape[1]
|
| 872 |
+
# up
|
| 873 |
+
image[0:mask_edgesize[0], :, ...] = mask_value
|
| 874 |
+
# down
|
| 875 |
+
image[H-mask_edgesize[1]:H, :, ...] = mask_value
|
| 876 |
+
# left
|
| 877 |
+
image[:, 0:mask_edgesize[2], ...] = mask_value
|
| 878 |
+
# right
|
| 879 |
+
image[:, W-mask_edgesize[3]:W, ...] = mask_value
|
| 880 |
+
|
| 881 |
+
return image
|
| 882 |
+
|
| 883 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 884 |
+
assert len(images[0].shape) == 3 and len(labels[0].shape) == 2
|
| 885 |
+
assert labels[0].dtype == np.float
|
| 886 |
+
|
| 887 |
+
prob = random.uniform(0, 1)
|
| 888 |
+
if prob > self.prob:
|
| 889 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 890 |
+
|
| 891 |
+
mask_edgesize = random.sample(range(self.mask_maxsize), 4) #[up, down, left, right]
|
| 892 |
+
for i in range(len(images)):
|
| 893 |
+
img = images[i]
|
| 894 |
+
label = labels[i] if i < len(labels) else None
|
| 895 |
+
img = self.mask_edge(img, mask_edgesize, self.rgb_invalid)
|
| 896 |
+
|
| 897 |
+
images[i] = img
|
| 898 |
+
if label is not None:
|
| 899 |
+
label = self.mask_edge(label, mask_edgesize, self.label_invalid)
|
| 900 |
+
labels[i] = label
|
| 901 |
+
|
| 902 |
+
if normals != None:
|
| 903 |
+
for i, normal in enumerate(normals):
|
| 904 |
+
normals[i] = self.mask_edge(normal, mask_edgesize, mask_value=0)
|
| 905 |
+
|
| 906 |
+
if other_labels != None:
|
| 907 |
+
# other labels are like semantic segmentations, instance segmentations, instance planes segmentations...
|
| 908 |
+
for i, other_label_i in enumerate(other_labels):
|
| 909 |
+
other_labels[i] = self.mask_edge(other_label_i, mask_edgesize, self.label_invalid)
|
| 910 |
+
|
| 911 |
+
if transform_paras is not None:
|
| 912 |
+
pad = transform_paras['pad'] if 'pad' in transform_paras else [0,0,0,0]
|
| 913 |
+
new_pad = [max(mask_edgesize[0], pad[0]), max(mask_edgesize[1], pad[1]), max(mask_edgesize[2], pad[2]), max(mask_edgesize[3], pad[3])]
|
| 914 |
+
transform_paras.update(dict(pad=new_pad))
|
| 915 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 916 |
+
|
| 917 |
+
|
| 918 |
+
class AdjustSize(object):
|
| 919 |
+
"""Crops the given ndarray image (H*W*C or H*W).
|
| 920 |
+
Args:
|
| 921 |
+
size (sequence or int): Desired output size of the crop. If size is an
|
| 922 |
+
int instead of sequence like (h, w), a square crop (size, size) is made.
|
| 923 |
+
"""
|
| 924 |
+
def __init__(self, padding=None, ignore_label=-1, **kwargs):
|
| 925 |
+
|
| 926 |
+
if padding is None:
|
| 927 |
+
self.padding = padding
|
| 928 |
+
elif isinstance(padding, list):
|
| 929 |
+
if all(isinstance(i, numbers.Number) for i in padding):
|
| 930 |
+
self.padding = padding
|
| 931 |
+
else:
|
| 932 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 933 |
+
if len(padding) != 3:
|
| 934 |
+
raise (RuntimeError("padding channel is not equal with 3\n"))
|
| 935 |
+
else:
|
| 936 |
+
raise (RuntimeError("padding in Crop() should be a number list\n"))
|
| 937 |
+
if isinstance(ignore_label, int):
|
| 938 |
+
self.ignore_label = ignore_label
|
| 939 |
+
else:
|
| 940 |
+
raise (RuntimeError("ignore_label should be an integer number\n"))
|
| 941 |
+
|
| 942 |
+
def get_pad_paras(self, h, w):
|
| 943 |
+
pad_h = 32 - h % 32 if h %32 != 0 else 0
|
| 944 |
+
pad_w = 32 - w % 32 if w %32 != 0 else 0
|
| 945 |
+
pad_h_half = int(pad_h // 2)
|
| 946 |
+
pad_w_half = int(pad_w // 2)
|
| 947 |
+
return pad_h, pad_w, pad_h_half, pad_w_half
|
| 948 |
+
|
| 949 |
+
def main_data_transform(self, image, label, intrinsic, cam_model):
|
| 950 |
+
h, w, _ = image.shape
|
| 951 |
+
pad_h, pad_w, pad_h_half, pad_w_half = self.get_pad_paras(h=h, w=w)
|
| 952 |
+
if pad_h > 0 or pad_w > 0:
|
| 953 |
+
if self.padding is None:
|
| 954 |
+
raise (RuntimeError("depthtransform.Crop() need padding while padding argument is None\n"))
|
| 955 |
+
image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.padding)
|
| 956 |
+
if label is not None:
|
| 957 |
+
label = cv2.copyMakeBorder(label, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.ignore_label)
|
| 958 |
+
if cam_model is not None:
|
| 959 |
+
cam_model = cv2.copyMakeBorder(cam_model, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.ignore_label)
|
| 960 |
+
|
| 961 |
+
if intrinsic is not None:
|
| 962 |
+
intrinsic[2] = intrinsic[2] + pad_w_half
|
| 963 |
+
intrinsic[3] = intrinsic[3] + pad_h_half
|
| 964 |
+
pad=[pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half]
|
| 965 |
+
return image, label, intrinsic, cam_model, pad
|
| 966 |
+
|
| 967 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 968 |
+
target_img = images[0]
|
| 969 |
+
target_h, target_w, _ = target_img.shape
|
| 970 |
+
for i in range(len(images)):
|
| 971 |
+
img = images[i]
|
| 972 |
+
label = labels[i] if i < len(labels) else None
|
| 973 |
+
intrinsic = intrinsics[i] if i < len(intrinsics) else None
|
| 974 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 975 |
+
img, label, intrinsic, cam_model, pad = self.main_data_transform(
|
| 976 |
+
img, label, intrinsic, cam_model)
|
| 977 |
+
images[i] = img
|
| 978 |
+
if label is not None:
|
| 979 |
+
labels[i] = label
|
| 980 |
+
if intrinsic is not None:
|
| 981 |
+
intrinsics[i] = intrinsic
|
| 982 |
+
if cam_model is not None:
|
| 983 |
+
cam_models[i] = cam_model
|
| 984 |
+
|
| 985 |
+
if transform_paras is not None:
|
| 986 |
+
transform_paras.update(dict(pad=pad))
|
| 987 |
+
if normals is not None:
|
| 988 |
+
pad_h, pad_w, pad_h_half, pad_w_half = self.get_pad_paras(h=target_h, w=target_w)
|
| 989 |
+
for i, normal in enumerate(normals):
|
| 990 |
+
normals[i] = cv2.copyMakeBorder(normal, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=0)
|
| 991 |
+
|
| 992 |
+
if other_labels is not None:
|
| 993 |
+
pad_h, pad_w, pad_h_half, pad_w_half = self.get_pad_paras(h=target_h, w=target_w)
|
| 994 |
+
for i, other_lab in enumerate(other_labels):
|
| 995 |
+
other_labels[i] = cv2.copyMakeBorder(other_lab, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.ignore_label)
|
| 996 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 997 |
+
|
| 998 |
+
|
| 999 |
+
class RandomHorizontalFlip(object):
|
| 1000 |
+
def __init__(self, prob=0.5, **kwargs):
|
| 1001 |
+
self.p = prob
|
| 1002 |
+
|
| 1003 |
+
def main_data_transform(self, image, label, intrinsic, cam_model, rotate):
|
| 1004 |
+
if rotate:
|
| 1005 |
+
image = cv2.flip(image, 1)
|
| 1006 |
+
if label is not None:
|
| 1007 |
+
label = cv2.flip(label, 1)
|
| 1008 |
+
if intrinsic is not None:
|
| 1009 |
+
h, w, _ = image.shape
|
| 1010 |
+
intrinsic[2] = w - intrinsic[2]
|
| 1011 |
+
intrinsic[3] = h - intrinsic[3]
|
| 1012 |
+
if cam_model is not None:
|
| 1013 |
+
cam_model = cv2.flip(cam_model, 1)
|
| 1014 |
+
cam_model[:, :, 0] = cam_model[:, :, 0] * -1
|
| 1015 |
+
cam_model[:, :, 2] = cam_model[:, :, 2] * -1
|
| 1016 |
+
return image, label, intrinsic, cam_model
|
| 1017 |
+
|
| 1018 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1019 |
+
rotate = random.random() > self.p
|
| 1020 |
+
|
| 1021 |
+
for i in range(len(images)):
|
| 1022 |
+
img = images[i]
|
| 1023 |
+
label = labels[i] if i < len(labels) else None
|
| 1024 |
+
intrinsic = intrinsics[i] if i < len(intrinsics) else None
|
| 1025 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 1026 |
+
img, label, intrinsic, cam_model = self.main_data_transform(
|
| 1027 |
+
img, label, intrinsic, cam_model, rotate)
|
| 1028 |
+
images[i] = img
|
| 1029 |
+
if label is not None:
|
| 1030 |
+
labels[i] = label
|
| 1031 |
+
if intrinsic is not None:
|
| 1032 |
+
intrinsics[i] = intrinsic
|
| 1033 |
+
if cam_model is not None:
|
| 1034 |
+
cam_models[i] = cam_model
|
| 1035 |
+
if normals is not None:
|
| 1036 |
+
for i, normal in enumerate(normals):
|
| 1037 |
+
if rotate:
|
| 1038 |
+
normal = cv2.flip(normal, 1)
|
| 1039 |
+
normal[:, :, 0] = -normal[:, :, 0] # NOTE: check the direction of normal coordinates axis, this is used in https://github.com/baegwangbin/surface_normal_uncertainty
|
| 1040 |
+
normals[i] = normal
|
| 1041 |
+
|
| 1042 |
+
if other_labels is not None:
|
| 1043 |
+
for i, other_lab in enumerate(other_labels):
|
| 1044 |
+
if rotate:
|
| 1045 |
+
other_lab = cv2.flip(other_lab, 1)
|
| 1046 |
+
other_labels[i] = other_lab
|
| 1047 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1048 |
+
|
| 1049 |
+
class RandomBlur(object):
|
| 1050 |
+
def __init__(self,
|
| 1051 |
+
aver_kernal=(2, 10),
|
| 1052 |
+
motion_kernal=(5, 15),
|
| 1053 |
+
angle=[-80, 80],
|
| 1054 |
+
prob=0.3,
|
| 1055 |
+
**kwargs):
|
| 1056 |
+
|
| 1057 |
+
gaussian_blur = iaa.AverageBlur(k=aver_kernal)
|
| 1058 |
+
motion_blur = iaa.MotionBlur(k=motion_kernal, angle=angle)
|
| 1059 |
+
zoom_blur = iaa.imgcorruptlike.ZoomBlur(severity=1)
|
| 1060 |
+
self.prob = prob
|
| 1061 |
+
self.blurs = [gaussian_blur, motion_blur, zoom_blur]
|
| 1062 |
+
|
| 1063 |
+
def blur(self, imgs, id):
|
| 1064 |
+
blur_mtd = self.blurs[id]
|
| 1065 |
+
return blur_mtd(images=imgs)
|
| 1066 |
+
|
| 1067 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1068 |
+
prob = random.random()
|
| 1069 |
+
if prob < self.prob:
|
| 1070 |
+
id = random.randint(0, len(self.blurs)-1)
|
| 1071 |
+
images = self.blur(images, id)
|
| 1072 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1073 |
+
|
| 1074 |
+
class RGBCompresion(object):
|
| 1075 |
+
def __init__(self, prob=0.1, compression=(0, 50), **kwargs):
|
| 1076 |
+
self.rgb_compress = iaa.Sequential(
|
| 1077 |
+
[
|
| 1078 |
+
iaa.JpegCompression(compression=compression),
|
| 1079 |
+
],
|
| 1080 |
+
random_order=True,
|
| 1081 |
+
)
|
| 1082 |
+
self.prob = prob
|
| 1083 |
+
|
| 1084 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1085 |
+
if random.random() < self.prob:
|
| 1086 |
+
images = self.rgb_compress(images=images)
|
| 1087 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1088 |
+
|
| 1089 |
+
|
| 1090 |
+
class RGB2BGR(object):
|
| 1091 |
+
# Converts image from RGB order to BGR order, for model initialized from Caffe
|
| 1092 |
+
def __init__(self, **kwargs):
|
| 1093 |
+
return
|
| 1094 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1095 |
+
for i, img in enumerate(images):
|
| 1096 |
+
images[i] = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
|
| 1097 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1098 |
+
|
| 1099 |
+
|
| 1100 |
+
class BGR2RGB(object):
|
| 1101 |
+
# Converts image from BGR order to RGB order, for model initialized from Pytorch
|
| 1102 |
+
def __init__(self, **kwargs):
|
| 1103 |
+
return
|
| 1104 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1105 |
+
for i, img in enumerate(images):
|
| 1106 |
+
images[i] = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 1107 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1108 |
+
|
| 1109 |
+
|
| 1110 |
+
class PhotoMetricDistortion(object):
|
| 1111 |
+
"""Apply photometric distortion to image sequentially, every transformation
|
| 1112 |
+
is applied with a probability of 0.5. The position of random contrast is in
|
| 1113 |
+
second or second to last.
|
| 1114 |
+
1. random brightness
|
| 1115 |
+
2. random contrast (mode 0)
|
| 1116 |
+
3. convert color from BGR to HSV
|
| 1117 |
+
4. random saturation
|
| 1118 |
+
5. random hue
|
| 1119 |
+
6. convert color from HSV to BGR
|
| 1120 |
+
7. random contrast (mode 1)
|
| 1121 |
+
Args:
|
| 1122 |
+
brightness_delta (int): delta of brightness.
|
| 1123 |
+
contrast_range (tuple): range of contrast.
|
| 1124 |
+
saturation_range (tuple): range of saturation.
|
| 1125 |
+
hue_delta (int): delta of hue.
|
| 1126 |
+
"""
|
| 1127 |
+
|
| 1128 |
+
def __init__(self,
|
| 1129 |
+
brightness_delta=32,
|
| 1130 |
+
contrast_range=(0.5, 1.5),
|
| 1131 |
+
saturation_range=(0.5, 1.5),
|
| 1132 |
+
hue_delta=18,
|
| 1133 |
+
to_gray_prob=0.3,
|
| 1134 |
+
distortion_prob=0.3,
|
| 1135 |
+
**kwargs):
|
| 1136 |
+
self.brightness_delta = brightness_delta
|
| 1137 |
+
self.contrast_lower, self.contrast_upper = contrast_range
|
| 1138 |
+
self.saturation_lower, self.saturation_upper = saturation_range
|
| 1139 |
+
self.hue_delta = hue_delta
|
| 1140 |
+
self.gray_aug = iaa.Grayscale(alpha=(0.8, 1.0))
|
| 1141 |
+
self.to_gray_prob = to_gray_prob
|
| 1142 |
+
self.distortion_prob = distortion_prob
|
| 1143 |
+
|
| 1144 |
+
def convert(self, img, alpha=1.0, beta=0.0):
|
| 1145 |
+
"""Multiple with alpha and add beat with clip."""
|
| 1146 |
+
img = img.astype(np.float32) * alpha + beta
|
| 1147 |
+
img = np.clip(img, 0, 255)
|
| 1148 |
+
return img.astype(np.uint8)
|
| 1149 |
+
|
| 1150 |
+
def brightness(self, img, beta, do):
|
| 1151 |
+
"""Brightness distortion."""
|
| 1152 |
+
if do:
|
| 1153 |
+
# beta = random.uniform(-self.brightness_delta,
|
| 1154 |
+
# self.brightness_delta)
|
| 1155 |
+
img = self.convert(
|
| 1156 |
+
img,
|
| 1157 |
+
beta=beta)
|
| 1158 |
+
return img
|
| 1159 |
+
|
| 1160 |
+
def contrast(self, img, alpha, do):
|
| 1161 |
+
"""Contrast distortion."""
|
| 1162 |
+
if do:
|
| 1163 |
+
#alpha = random.uniform(self.contrast_lower, self.contrast_upper)
|
| 1164 |
+
img = self.convert(
|
| 1165 |
+
img,
|
| 1166 |
+
alpha=alpha)
|
| 1167 |
+
return img
|
| 1168 |
+
|
| 1169 |
+
def saturation(self, img, alpha, do):
|
| 1170 |
+
"""Saturation distortion."""
|
| 1171 |
+
if do:
|
| 1172 |
+
# alpha = random.uniform(self.saturation_lower,
|
| 1173 |
+
# self.saturation_upper)
|
| 1174 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
| 1175 |
+
img[:, :, 1] = self.convert(
|
| 1176 |
+
img[:, :, 1],
|
| 1177 |
+
alpha=alpha)
|
| 1178 |
+
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
|
| 1179 |
+
return img
|
| 1180 |
+
|
| 1181 |
+
def hue(self, img, rand_hue, do):
|
| 1182 |
+
"""Hue distortion."""
|
| 1183 |
+
if do:
|
| 1184 |
+
# rand_hue = random.randint(-self.hue_delta, self.hue_delta)
|
| 1185 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
|
| 1186 |
+
img[:, :, 0] = (img[:, :, 0].astype(int) + rand_hue) % 180
|
| 1187 |
+
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
|
| 1188 |
+
return img
|
| 1189 |
+
|
| 1190 |
+
def rgb2gray(self, img):
|
| 1191 |
+
img = self.gray_aug(image=img)
|
| 1192 |
+
return img
|
| 1193 |
+
|
| 1194 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1195 |
+
"""Call function to perform photometric distortion on images.
|
| 1196 |
+
Args:
|
| 1197 |
+
results (dict): Result dict from loading pipeline.
|
| 1198 |
+
Returns:
|
| 1199 |
+
dict: Result dict with images distorted.
|
| 1200 |
+
"""
|
| 1201 |
+
brightness_beta = random.uniform(-self.brightness_delta, self.brightness_delta)
|
| 1202 |
+
brightness_do = random.random() < self.distortion_prob
|
| 1203 |
+
|
| 1204 |
+
contrast_alpha = random.uniform(self.contrast_lower, self.contrast_upper)
|
| 1205 |
+
contrast_do = random.random() < self.distortion_prob
|
| 1206 |
+
|
| 1207 |
+
saturate_alpha = random.uniform(self.saturation_lower, self.saturation_upper)
|
| 1208 |
+
saturate_do = random.random() < self.distortion_prob
|
| 1209 |
+
|
| 1210 |
+
rand_hue = random.randint(-self.hue_delta, self.hue_delta)
|
| 1211 |
+
rand_hue_do = random.random() < self.distortion_prob
|
| 1212 |
+
|
| 1213 |
+
# mode == 0 --> do random contrast first
|
| 1214 |
+
# mode == 1 --> do random contrast last
|
| 1215 |
+
mode = 1 if random.random() > 0.5 else 2
|
| 1216 |
+
for i, img in enumerate(images):
|
| 1217 |
+
if random.random() < self.to_gray_prob:
|
| 1218 |
+
img = self.rgb2gray(img)
|
| 1219 |
+
else:
|
| 1220 |
+
# random brightness
|
| 1221 |
+
img = self.brightness(img, brightness_beta, brightness_do)
|
| 1222 |
+
|
| 1223 |
+
if mode == 1:
|
| 1224 |
+
img = self.contrast(img, contrast_alpha, contrast_do)
|
| 1225 |
+
|
| 1226 |
+
# random saturation
|
| 1227 |
+
img = self.saturation(img, saturate_alpha, saturate_do)
|
| 1228 |
+
|
| 1229 |
+
# random hue
|
| 1230 |
+
img = self.hue(img, rand_hue, rand_hue_do)
|
| 1231 |
+
|
| 1232 |
+
# random contrast
|
| 1233 |
+
if mode == 0:
|
| 1234 |
+
img = self.contrast(img, contrast_alpha, contrast_do)
|
| 1235 |
+
images[i] = img
|
| 1236 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1237 |
+
|
| 1238 |
+
class Weather(object):
|
| 1239 |
+
"""Apply the following weather augmentations to data.
|
| 1240 |
+
Args:
|
| 1241 |
+
prob (float): probability to enforce the weather augmentation.
|
| 1242 |
+
"""
|
| 1243 |
+
|
| 1244 |
+
def __init__(self,
|
| 1245 |
+
prob=0.3,
|
| 1246 |
+
**kwargs):
|
| 1247 |
+
snow = iaa.FastSnowyLandscape(
|
| 1248 |
+
lightness_threshold=[50, 100],
|
| 1249 |
+
lightness_multiplier=(1.2, 2)
|
| 1250 |
+
)
|
| 1251 |
+
cloud = iaa.Clouds()
|
| 1252 |
+
fog = iaa.Fog()
|
| 1253 |
+
snow_flakes = iaa.Snowflakes(flake_size=(0.2, 0.4), speed=(0.001, 0.03)) #iaa.imgcorruptlike.Snow(severity=2)#
|
| 1254 |
+
rain = iaa.Rain(speed=(0.1, 0.3), drop_size=(0.1, 0.3))
|
| 1255 |
+
# rain_drops = RainDrop_Augmentor()
|
| 1256 |
+
self.aug_list = [
|
| 1257 |
+
snow, cloud, fog, snow_flakes, rain,
|
| 1258 |
+
#wa.add_sun_flare, wa.darken, wa.random_brightness,
|
| 1259 |
+
]
|
| 1260 |
+
self.prob = prob
|
| 1261 |
+
|
| 1262 |
+
def aug_with_weather(self, imgs, id):
|
| 1263 |
+
weather = self.aug_list[id]
|
| 1264 |
+
if id <5:
|
| 1265 |
+
return weather(images=imgs)
|
| 1266 |
+
else:
|
| 1267 |
+
return weather(imgs)
|
| 1268 |
+
|
| 1269 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1270 |
+
"""Call function to perform photometric distortion on images.
|
| 1271 |
+
Args:
|
| 1272 |
+
results (dict): Result dict from loading pipeline.
|
| 1273 |
+
Returns:
|
| 1274 |
+
dict: Result dict with images distorted.
|
| 1275 |
+
"""
|
| 1276 |
+
if random.random() < self.prob:
|
| 1277 |
+
select_id = np.random.randint(0, high=len(self.aug_list))
|
| 1278 |
+
images = self.aug_with_weather(images, select_id)
|
| 1279 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1280 |
+
|
| 1281 |
+
|
| 1282 |
+
def resize_depth_preserve(depth, shape):
|
| 1283 |
+
"""
|
| 1284 |
+
Resizes depth map preserving all valid depth pixels
|
| 1285 |
+
Multiple downsampled points can be assigned to the same pixel.
|
| 1286 |
+
|
| 1287 |
+
Parameters
|
| 1288 |
+
----------
|
| 1289 |
+
depth : np.array [h,w]
|
| 1290 |
+
Depth map
|
| 1291 |
+
shape : tuple (H,W)
|
| 1292 |
+
Output shape
|
| 1293 |
+
|
| 1294 |
+
Returns
|
| 1295 |
+
-------
|
| 1296 |
+
depth : np.array [H,W,1]
|
| 1297 |
+
Resized depth map
|
| 1298 |
+
"""
|
| 1299 |
+
# Store dimensions and reshapes to single column
|
| 1300 |
+
depth = np.squeeze(depth)
|
| 1301 |
+
h, w = depth.shape
|
| 1302 |
+
x = depth.reshape(-1)
|
| 1303 |
+
# Create coordinate grid
|
| 1304 |
+
uv = np.mgrid[:h, :w].transpose(1, 2, 0).reshape(-1, 2)
|
| 1305 |
+
# Filters valid points
|
| 1306 |
+
idx = x > 0
|
| 1307 |
+
crd, val = uv[idx], x[idx]
|
| 1308 |
+
# Downsamples coordinates
|
| 1309 |
+
crd[:, 0] = (crd[:, 0] * (shape[0] / h) + 0.5).astype(np.int32)
|
| 1310 |
+
crd[:, 1] = (crd[:, 1] * (shape[1] / w) + 0.5).astype(np.int32)
|
| 1311 |
+
# Filters points inside image
|
| 1312 |
+
idx = (crd[:, 0] < shape[0]) & (crd[:, 1] < shape[1])
|
| 1313 |
+
crd, val = crd[idx], val[idx]
|
| 1314 |
+
# Creates downsampled depth image and assigns points
|
| 1315 |
+
depth = np.zeros(shape)
|
| 1316 |
+
depth[crd[:, 0], crd[:, 1]] = val
|
| 1317 |
+
# Return resized depth map
|
| 1318 |
+
return depth
|
| 1319 |
+
|
| 1320 |
+
|
| 1321 |
+
def gray_to_colormap(img, cmap='rainbow', max_value=None):
|
| 1322 |
+
"""
|
| 1323 |
+
Transfer gray map to matplotlib colormap
|
| 1324 |
+
"""
|
| 1325 |
+
assert img.ndim == 2
|
| 1326 |
+
|
| 1327 |
+
img[img<0] = 0
|
| 1328 |
+
mask_invalid = img < 1e-10
|
| 1329 |
+
if max_value == None:
|
| 1330 |
+
img = img / (img.max() + 1e-8)
|
| 1331 |
+
else:
|
| 1332 |
+
img = img / (max_value + 1e-8)
|
| 1333 |
+
norm = matplotlib.colors.Normalize(vmin=0, vmax=1.1)
|
| 1334 |
+
cmap_m = matplotlib.cm.get_cmap(cmap)
|
| 1335 |
+
map = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap_m)
|
| 1336 |
+
colormap = (map.to_rgba(img)[:, :, :3] * 255).astype(np.uint8)
|
| 1337 |
+
colormap[mask_invalid] = 0
|
| 1338 |
+
return colormap
|
| 1339 |
+
|
| 1340 |
+
|
| 1341 |
+
class LiDarResizeCanonical(object):
|
| 1342 |
+
"""
|
| 1343 |
+
Resize the input to the canonical space first, then resize the input with random sampled size.
|
| 1344 |
+
In the first stage, we assume the distance holds while the camera model varies.
|
| 1345 |
+
In the second stage, we aim to simulate the observation in different distance. The camera will move along the optical axis.
|
| 1346 |
+
"""
|
| 1347 |
+
def __init__(self, **kwargs):
|
| 1348 |
+
self.ratio_range = kwargs['ratio_range']
|
| 1349 |
+
self.canonical_focal = kwargs['focal_length']
|
| 1350 |
+
self.crop_size = kwargs['crop_size']
|
| 1351 |
+
|
| 1352 |
+
def random_on_canonical_transform(self, image, label, intrinsic, cam_model, to_random_ratio):
|
| 1353 |
+
ori_h, ori_w, _ = image.shape
|
| 1354 |
+
ori_focal = (intrinsic[0] + intrinsic[1]) / 2.0
|
| 1355 |
+
|
| 1356 |
+
to_canonical_ratio = self.canonical_focal / ori_focal
|
| 1357 |
+
to_scale_ratio = to_random_ratio
|
| 1358 |
+
resize_ratio = to_canonical_ratio * to_random_ratio
|
| 1359 |
+
reshape_h = int(ori_h * resize_ratio + 0.5)
|
| 1360 |
+
reshape_w = int(ori_w * resize_ratio + 0.5)
|
| 1361 |
+
|
| 1362 |
+
image = cv2.resize(image, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 1363 |
+
if intrinsic is not None:
|
| 1364 |
+
intrinsic = [self.canonical_focal, self.canonical_focal, intrinsic[2]*resize_ratio, intrinsic[3]*resize_ratio]
|
| 1365 |
+
if label is not None:
|
| 1366 |
+
# number of other labels may be less than that of image
|
| 1367 |
+
#label = cv2.resize(label, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 1368 |
+
label = resize_depth_preserve(label, (reshape_h, reshape_w))
|
| 1369 |
+
# scale the label and camera intrinsics
|
| 1370 |
+
label = label / to_scale_ratio
|
| 1371 |
+
|
| 1372 |
+
if cam_model is not None:
|
| 1373 |
+
# Should not directly resize the cam_model.
|
| 1374 |
+
# Camera model should be resized in 'to canonical' stage, while it holds in 'random resizing' stage.
|
| 1375 |
+
# cam_model = cv2.resize(cam_model, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 1376 |
+
cam_model = build_camera_model(reshape_h, reshape_w, intrinsic)
|
| 1377 |
+
return image, label, intrinsic, cam_model, to_scale_ratio
|
| 1378 |
+
|
| 1379 |
+
def random_on_crop_transform(self, image, label, intrinsic, cam_model, to_random_ratio):
|
| 1380 |
+
ori_h, ori_w, _ = image.shape
|
| 1381 |
+
crop_h, crop_w = self.crop_size
|
| 1382 |
+
ori_focal = (intrinsic[0] + intrinsic[1]) / 2.0
|
| 1383 |
+
|
| 1384 |
+
to_canonical_ratio = self.canonical_focal / ori_focal
|
| 1385 |
+
|
| 1386 |
+
# random resize based on the last crop size
|
| 1387 |
+
proposal_reshape_h = int(crop_h * to_random_ratio + 0.5)
|
| 1388 |
+
proposal_reshape_w = int(crop_w * to_random_ratio + 0.5)
|
| 1389 |
+
resize_ratio_h = proposal_reshape_h / ori_h
|
| 1390 |
+
resize_ratio_w = proposal_reshape_w / ori_w
|
| 1391 |
+
resize_ratio = min(resize_ratio_h, resize_ratio_w) # resize based on the long edge
|
| 1392 |
+
reshape_h = int(ori_h * resize_ratio + 0.5)
|
| 1393 |
+
reshape_w = int(ori_w * resize_ratio + 0.5)
|
| 1394 |
+
|
| 1395 |
+
to_scale_ratio = resize_ratio / to_canonical_ratio
|
| 1396 |
+
|
| 1397 |
+
image = cv2.resize(image, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 1398 |
+
if intrinsic is not None:
|
| 1399 |
+
intrinsic = [self.canonical_focal, self.canonical_focal, intrinsic[2]*resize_ratio, intrinsic[3]*resize_ratio]
|
| 1400 |
+
if label is not None:
|
| 1401 |
+
# number of other labels may be less than that of image
|
| 1402 |
+
# label = cv2.resize(label, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 1403 |
+
label = resize_depth_preserve(label, (reshape_h, reshape_w))
|
| 1404 |
+
# scale the label and camera intrinsics
|
| 1405 |
+
label = label / to_scale_ratio
|
| 1406 |
+
|
| 1407 |
+
if cam_model is not None:
|
| 1408 |
+
# Should not directly resize the cam_model.
|
| 1409 |
+
# Camera model should be resized in 'to canonical' stage, while it holds in 'random resizing' stage.
|
| 1410 |
+
# cam_model = cv2.resize(cam_model, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_LINEAR)
|
| 1411 |
+
cam_model = build_camera_model(reshape_h, reshape_w, intrinsic)
|
| 1412 |
+
return image, label, intrinsic, cam_model, to_scale_ratio
|
| 1413 |
+
|
| 1414 |
+
def __call__(self, images, labels, intrinsics, cam_models=None, normals=None, other_labels=None, transform_paras=None):
|
| 1415 |
+
assert len(images[0].shape) == 3 and len(labels[0].shape) == 2
|
| 1416 |
+
assert labels[0].dtype == np.float
|
| 1417 |
+
target_focal = (intrinsics[0][0] + intrinsics[0][1]) / 2.0
|
| 1418 |
+
target_to_canonical_ratio = self.canonical_focal / target_focal
|
| 1419 |
+
target_img_shape = images[0].shape
|
| 1420 |
+
to_random_ratio = random.uniform(self.ratio_range[0], self.ratio_range[1])
|
| 1421 |
+
to_scale_ratio = 0
|
| 1422 |
+
for i in range(len(images)):
|
| 1423 |
+
img = images[i]
|
| 1424 |
+
label = labels[i] if i < len(labels) else None
|
| 1425 |
+
intrinsic = intrinsics[i] if i < len(intrinsics) else None
|
| 1426 |
+
cam_model = cam_models[i] if cam_models is not None and i < len(cam_models) else None
|
| 1427 |
+
img, label, intrinsic, cam_model, to_scale_ratio = self.random_on_canonical_transform(
|
| 1428 |
+
img, label, intrinsic, cam_model, to_random_ratio)
|
| 1429 |
+
|
| 1430 |
+
images[i] = img
|
| 1431 |
+
if label is not None:
|
| 1432 |
+
labels[i] = label
|
| 1433 |
+
if intrinsic is not None:
|
| 1434 |
+
intrinsics[i] = intrinsic
|
| 1435 |
+
if cam_model is not None:
|
| 1436 |
+
cam_models[i] = cam_model
|
| 1437 |
+
if normals != None:
|
| 1438 |
+
reshape_h, reshape_w, _ = images[0].shape
|
| 1439 |
+
for i, normal in enumerate(normals):
|
| 1440 |
+
normals[i] = cv2.resize(normal, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 1441 |
+
|
| 1442 |
+
if other_labels != None:
|
| 1443 |
+
# other labels are like semantic segmentations, instance segmentations, instance planes segmentations...
|
| 1444 |
+
# resize_ratio = target_to_canonical_ratio * to_random_ratio
|
| 1445 |
+
# reshape_h = int(target_img_shape[0] * resize_ratio + 0.5)
|
| 1446 |
+
# reshape_w = int(target_img_shape[1] * resize_ratio + 0.5)
|
| 1447 |
+
reshape_h, reshape_w, _ = images[0].shape
|
| 1448 |
+
for i, other_label_i in enumerate(other_labels):
|
| 1449 |
+
other_labels[i] = cv2.resize(other_label_i, dsize=(reshape_w, reshape_h), interpolation=cv2.INTER_NEAREST)
|
| 1450 |
+
|
| 1451 |
+
if transform_paras is not None:
|
| 1452 |
+
transform_paras.update(label_scale_factor = 1.0/to_scale_ratio)
|
| 1453 |
+
|
| 1454 |
+
return images, labels, intrinsics, cam_models, normals, other_labels, transform_paras
|
| 1455 |
+
|
| 1456 |
+
|
| 1457 |
+
|
| 1458 |
+
def build_camera_model(H : int, W : int, intrinsics : list) -> np.array:
|
| 1459 |
+
"""
|
| 1460 |
+
Encode the camera intrinsic parameters (focal length and principle point) to a 4-channel map.
|
| 1461 |
+
"""
|
| 1462 |
+
fx, fy, u0, v0 = intrinsics
|
| 1463 |
+
f = (fx + fy) / 2.0
|
| 1464 |
+
# principle point location
|
| 1465 |
+
x_row = np.arange(0, W).astype(np.float32)
|
| 1466 |
+
x_row_center_norm = (x_row - u0) / W
|
| 1467 |
+
x_center = np.tile(x_row_center_norm, (H, 1)) # [H, W]
|
| 1468 |
+
|
| 1469 |
+
y_col = np.arange(0, H).astype(np.float32)
|
| 1470 |
+
y_col_center_norm = (y_col - v0) / H
|
| 1471 |
+
y_center = np.tile(y_col_center_norm, (W, 1)).T
|
| 1472 |
+
|
| 1473 |
+
# FoV
|
| 1474 |
+
fov_x = np.arctan(x_center / (f / W))
|
| 1475 |
+
fov_y = np.arctan(y_center/ (f / H))
|
| 1476 |
+
|
| 1477 |
+
cam_model = np.stack([x_center, y_center, fov_x, fov_y], axis=2)
|
| 1478 |
+
return cam_model
|
| 1479 |
+
|
| 1480 |
+
|
| 1481 |
+
if __name__ == '__main__':
|
| 1482 |
+
img = cv2.imread('/mnt/mldb/raw/62b3ed3455e805efcb28c74b/NuScenes/data_test/samples/CAM_FRONT/n008-2018-08-01-15-34-25-0400__CAM_FRONT__1533152214512404.jpg', -1)
|
| 1483 |
+
H, W, _ = img.shape
|
| 1484 |
+
label = img[:, :, 0]
|
| 1485 |
+
intrinsic = [1000, 1000, W//2, H//2]
|
| 1486 |
+
for i in range(20):
|
| 1487 |
+
weather_aug = Weather(prob=1.0)
|
| 1488 |
+
img_aug, label, intrinsic, cam_model, ref_images, transform_paras = weather_aug([img, ], [label,], [intrinsic,])
|
| 1489 |
+
cv2.imwrite(f'test_aug_{i}.jpg', img_aug[0])
|
| 1490 |
+
|
| 1491 |
+
print('Done')
|
external/Metric3D/training/mono/utils/unproj_pcd.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
from plyfile import PlyData, PlyElement
|
| 4 |
+
import cv2
|
| 5 |
+
|
| 6 |
+
def get_pcd_base(H, W, u0, v0, focal_length):
|
| 7 |
+
x_row = np.arange(0, W)
|
| 8 |
+
x = np.tile(x_row, (H, 1))
|
| 9 |
+
x = x.astype(np.float32)
|
| 10 |
+
u_m_u0 = x - u0
|
| 11 |
+
|
| 12 |
+
y_col = np.arange(0, H) # y_col = np.arange(0, height)
|
| 13 |
+
y = np.tile(y_col, (W, 1)).T
|
| 14 |
+
y = y.astype(np.float32)
|
| 15 |
+
v_m_v0 = y - v0
|
| 16 |
+
|
| 17 |
+
x = u_m_u0 / focal_length
|
| 18 |
+
y = v_m_v0 / focal_length
|
| 19 |
+
z = np.ones_like(x)
|
| 20 |
+
pw = np.stack([x, y, z], 2) # [h, w, c]
|
| 21 |
+
return pw
|
| 22 |
+
|
| 23 |
+
def reconstruct_pcd(depth, focal_length, u0, v0, pcd_base=None, mask=None):
|
| 24 |
+
if type(depth) == torch.__name__:
|
| 25 |
+
depth = depth.cpu().numpy().squeeze()
|
| 26 |
+
depth = cv2.medianBlur(depth, 5)
|
| 27 |
+
if pcd_base is None:
|
| 28 |
+
H, W = depth.shape
|
| 29 |
+
pcd_base = get_pcd_base(H, W, u0, v0, focal_length)
|
| 30 |
+
pcd = depth[:, :, None] * pcd_base
|
| 31 |
+
if mask:
|
| 32 |
+
pcd[mask] = 0
|
| 33 |
+
return pcd
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def save_point_cloud(pcd, rgb, filename, binary=True):
|
| 37 |
+
"""Save an RGB point cloud as a PLY file.
|
| 38 |
+
:paras
|
| 39 |
+
@pcd: Nx3 matrix, the XYZ coordinates
|
| 40 |
+
@rgb: NX3 matrix, the rgb colors for each 3D point
|
| 41 |
+
"""
|
| 42 |
+
assert pcd.shape[0] == rgb.shape[0]
|
| 43 |
+
|
| 44 |
+
if rgb is None:
|
| 45 |
+
gray_concat = np.tile(np.array([128], dtype=np.uint8), (pcd.shape[0], 3))
|
| 46 |
+
points_3d = np.hstack((pcd, gray_concat))
|
| 47 |
+
else:
|
| 48 |
+
points_3d = np.hstack((pcd, rgb))
|
| 49 |
+
python_types = (float, float, float, int, int, int)
|
| 50 |
+
npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
|
| 51 |
+
('blue', 'u1')]
|
| 52 |
+
if binary is True:
|
| 53 |
+
# Format into NumPy structured array
|
| 54 |
+
vertices = []
|
| 55 |
+
for row_idx in range(points_3d.shape[0]):
|
| 56 |
+
cur_point = points_3d[row_idx]
|
| 57 |
+
vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point)))
|
| 58 |
+
vertices_array = np.array(vertices, dtype=npy_types)
|
| 59 |
+
el = PlyElement.describe(vertices_array, 'vertex')
|
| 60 |
+
|
| 61 |
+
# Write
|
| 62 |
+
PlyData([el]).write(filename)
|
| 63 |
+
else:
|
| 64 |
+
x = np.squeeze(points_3d[:, 0])
|
| 65 |
+
y = np.squeeze(points_3d[:, 1])
|
| 66 |
+
z = np.squeeze(points_3d[:, 2])
|
| 67 |
+
r = np.squeeze(points_3d[:, 3])
|
| 68 |
+
g = np.squeeze(points_3d[:, 4])
|
| 69 |
+
b = np.squeeze(points_3d[:, 5])
|
| 70 |
+
|
| 71 |
+
ply_head = 'ply\n' \
|
| 72 |
+
'format ascii 1.0\n' \
|
| 73 |
+
'element vertex %d\n' \
|
| 74 |
+
'property float x\n' \
|
| 75 |
+
'property float y\n' \
|
| 76 |
+
'property float z\n' \
|
| 77 |
+
'property uchar red\n' \
|
| 78 |
+
'property uchar green\n' \
|
| 79 |
+
'property uchar blue\n' \
|
| 80 |
+
'end_header' % r.shape[0]
|
| 81 |
+
# ---- Save ply data to disk
|
| 82 |
+
np.savetxt(filename, np.column_stack((x, y, z, r, g, b)), fmt="%d %d %d %d %d %d", header=ply_head, comments='')
|
external/Metric3D/training/mono/utils/visualization.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import matplotlib.pyplot as plt
|
| 2 |
+
import os, cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from mono.utils.transform import gray_to_colormap
|
| 5 |
+
import shutil
|
| 6 |
+
import glob
|
| 7 |
+
from mono.utils.running import main_process
|
| 8 |
+
import torch
|
| 9 |
+
from html4vision import Col, imagetable
|
| 10 |
+
|
| 11 |
+
def save_raw_imgs(
|
| 12 |
+
pred: torch.tensor,
|
| 13 |
+
rgb: torch.tensor,
|
| 14 |
+
filename: str,
|
| 15 |
+
save_dir: str,
|
| 16 |
+
scale: float=1000.0,
|
| 17 |
+
target: torch.tensor=None,
|
| 18 |
+
):
|
| 19 |
+
"""
|
| 20 |
+
Save raw GT, predictions, RGB in the same file.
|
| 21 |
+
"""
|
| 22 |
+
cv2.imwrite(os.path.join(save_dir, filename[:-4]+'_rgb.jpg'), rgb)
|
| 23 |
+
cv2.imwrite(os.path.join(save_dir, filename[:-4]+'_gt.png'), (pred*scale).astype(np.uint16))
|
| 24 |
+
if target is not None:
|
| 25 |
+
cv2.imwrite(os.path.join(save_dir, filename[:-4]+'_gt.png'), (target*scale).astype(np.uint16))
|
| 26 |
+
|
| 27 |
+
def save_normal_val_imgs(
|
| 28 |
+
iter: int,
|
| 29 |
+
pred: torch.tensor,
|
| 30 |
+
#targ: torch.tensor,
|
| 31 |
+
#rgb: torch.tensor,
|
| 32 |
+
filename: str,
|
| 33 |
+
save_dir: str,
|
| 34 |
+
tb_logger=None,
|
| 35 |
+
mask=None,
|
| 36 |
+
):
|
| 37 |
+
"""
|
| 38 |
+
Save GT, predictions, RGB in the same file.
|
| 39 |
+
"""
|
| 40 |
+
mean = np.array([123.675, 116.28, 103.53])[np.newaxis, np.newaxis, :]
|
| 41 |
+
std= np.array([58.395, 57.12, 57.375])[np.newaxis, np.newaxis, :]
|
| 42 |
+
pred = pred.squeeze()
|
| 43 |
+
|
| 44 |
+
# if pred.size(0) == 3:
|
| 45 |
+
# pred = pred.permute(1,2,0)
|
| 46 |
+
# pred_color = vis_surface_normal(pred, mask)
|
| 47 |
+
|
| 48 |
+
# #save one image only
|
| 49 |
+
# plt.imsave(os.path.join(save_dir, filename[:-4]+'.jpg'), pred_color)
|
| 50 |
+
|
| 51 |
+
targ = targ.squeeze()
|
| 52 |
+
rgb = rgb.squeeze()
|
| 53 |
+
|
| 54 |
+
if pred.size(0) == 3:
|
| 55 |
+
pred = pred.permute(1,2,0)
|
| 56 |
+
if targ.size(0) == 3:
|
| 57 |
+
targ = targ.permute(1,2,0)
|
| 58 |
+
if rgb.size(0) == 3:
|
| 59 |
+
rgb = rgb.permute(1,2,0)
|
| 60 |
+
|
| 61 |
+
pred_color = vis_surface_normal(pred, mask)
|
| 62 |
+
targ_color = vis_surface_normal(targ, mask)
|
| 63 |
+
rgb_color = ((rgb.cpu().numpy() * std) + mean).astype(np.uint8)
|
| 64 |
+
|
| 65 |
+
try:
|
| 66 |
+
cat_img = np.concatenate([rgb_color, pred_color, targ_color], axis=0)
|
| 67 |
+
except:
|
| 68 |
+
pred_color = cv2.resize(pred_color, (rgb.shape[1], rgb.shape[0]))
|
| 69 |
+
targ_color = cv2.resize(targ_color, (rgb.shape[1], rgb.shape[0]))
|
| 70 |
+
cat_img = np.concatenate([rgb_color, pred_color, targ_color], axis=0)
|
| 71 |
+
|
| 72 |
+
plt.imsave(os.path.join(save_dir, filename[:-4]+'_merge.jpg'), cat_img)
|
| 73 |
+
# cv2.imwrite(os.path.join(save_dir, filename[:-4]+'.jpg'), pred_color)
|
| 74 |
+
# save to tensorboard
|
| 75 |
+
if tb_logger is not None:
|
| 76 |
+
tb_logger.add_image(f'{filename[:-4]}_merge.jpg', cat_img.transpose((2, 0, 1)), iter)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def save_val_imgs(
|
| 82 |
+
iter: int,
|
| 83 |
+
pred: torch.tensor,
|
| 84 |
+
target: torch.tensor,
|
| 85 |
+
rgb: torch.tensor,
|
| 86 |
+
filename: str,
|
| 87 |
+
save_dir: str,
|
| 88 |
+
tb_logger=None
|
| 89 |
+
):
|
| 90 |
+
"""
|
| 91 |
+
Save GT, predictions, RGB in the same file.
|
| 92 |
+
"""
|
| 93 |
+
rgb, pred_scale, target_scale, pred_color, target_color, max_scale = get_data_for_log(pred, target, rgb)
|
| 94 |
+
rgb = rgb.transpose((1, 2, 0))
|
| 95 |
+
# plt.imsave(os.path.join(save_dir, filename[:-4]+'_rgb.jpg'), rgb)
|
| 96 |
+
# plt.imsave(os.path.join(save_dir, filename[:-4]+'_pred.png'), pred_scale, cmap='rainbow')
|
| 97 |
+
# plt.imsave(os.path.join(save_dir, filename[:-4]+'_gt.png'), target_scale, cmap='rainbow')
|
| 98 |
+
cat_img = np.concatenate([rgb, pred_color, target_color], axis=0)
|
| 99 |
+
plt.imsave(os.path.join(save_dir, filename[:-4]+'_merge.jpg'), cat_img)
|
| 100 |
+
|
| 101 |
+
# save to tensorboard
|
| 102 |
+
if tb_logger is not None:
|
| 103 |
+
# tb_logger.add_image(f'{filename[:-4]}_rgb.jpg', rgb, iter)
|
| 104 |
+
# tb_logger.add_image(f'{filename[:-4]}_pred.jpg', gray_to_colormap(pred_scale).transpose((2, 0, 1)), iter)
|
| 105 |
+
# tb_logger.add_image(f'{filename[:-4]}_gt.jpg', gray_to_colormap(target_scale).transpose((2, 0, 1)), iter)
|
| 106 |
+
tb_logger.add_image(f'{filename[:-4]}_merge.jpg', cat_img.transpose((2, 0, 1)), iter)
|
| 107 |
+
return max_scale
|
| 108 |
+
|
| 109 |
+
def get_data_for_log(pred: torch.tensor, target: torch.tensor, rgb: torch.tensor):
|
| 110 |
+
mean = np.array([123.675, 116.28, 103.53])[:, np.newaxis, np.newaxis]
|
| 111 |
+
std= np.array([58.395, 57.12, 57.375])[:, np.newaxis, np.newaxis]
|
| 112 |
+
|
| 113 |
+
pred = pred.squeeze().cpu().numpy()
|
| 114 |
+
target = target.squeeze().cpu().numpy()
|
| 115 |
+
rgb = rgb.squeeze().cpu().numpy()
|
| 116 |
+
|
| 117 |
+
pred[pred<0] = 0
|
| 118 |
+
target[target<0] = 0
|
| 119 |
+
#max_scale = max(pred.max(), target.max())
|
| 120 |
+
max_scale = min(2.0 * target.max(), pred.max())
|
| 121 |
+
pred[pred > max_scale] = max_scale
|
| 122 |
+
|
| 123 |
+
pred_scale = (pred/max_scale * 10000).astype(np.uint16)
|
| 124 |
+
target_scale = (target/max_scale * 10000).astype(np.uint16)
|
| 125 |
+
pred_color = gray_to_colormap(pred, max_value=max_scale)
|
| 126 |
+
target_color = gray_to_colormap(target, max_value=max_scale)
|
| 127 |
+
|
| 128 |
+
dilate = True
|
| 129 |
+
if dilate == True:
|
| 130 |
+
k=np.ones((3,3),np.uint8)
|
| 131 |
+
target_color=cv2.dilate(target_color,k,iterations=1)
|
| 132 |
+
|
| 133 |
+
pred_color = cv2.resize(pred_color, (rgb.shape[2], rgb.shape[1]))
|
| 134 |
+
target_color = cv2.resize(target_color, (rgb.shape[2], rgb.shape[1]))
|
| 135 |
+
|
| 136 |
+
rgb = ((rgb * std) + mean).astype(np.uint8)
|
| 137 |
+
return rgb, pred_scale, target_scale, pred_color, target_color, max_scale
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def create_html(name2path, save_path='index.html', size=(256, 384)):
|
| 141 |
+
# table description
|
| 142 |
+
cols = []
|
| 143 |
+
for k, v in name2path.items():
|
| 144 |
+
col_i = Col('img', k, v) # specify image content for column
|
| 145 |
+
cols.append(col_i)
|
| 146 |
+
# html table generation
|
| 147 |
+
imagetable(cols, out_file=save_path, imsize=size)
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def visual_train_data(gt_depth, rgb, filename, wkdir, replace=False, pred=None):
|
| 151 |
+
gt_depth = gt_depth.cpu().squeeze().numpy()
|
| 152 |
+
rgb = rgb.cpu().squeeze().numpy()
|
| 153 |
+
|
| 154 |
+
mean = np.array([123.675, 116.28, 103.53])[:, np.newaxis, np.newaxis]
|
| 155 |
+
std= np.array([58.395, 57.12, 57.375])[:, np.newaxis, np.newaxis]
|
| 156 |
+
mask = gt_depth > 0
|
| 157 |
+
|
| 158 |
+
rgb = ((rgb * std) + mean).astype(np.uint8).transpose((1, 2, 0))
|
| 159 |
+
gt_vis = gray_to_colormap(gt_depth)
|
| 160 |
+
if replace:
|
| 161 |
+
rgb[mask] = gt_vis[mask]
|
| 162 |
+
|
| 163 |
+
if pred is not None:
|
| 164 |
+
pred_depth = pred.detach().cpu().squeeze().numpy()
|
| 165 |
+
pred_vis = gray_to_colormap(pred_depth)
|
| 166 |
+
|
| 167 |
+
merge = np.concatenate([rgb, gt_vis, pred_vis], axis=0)
|
| 168 |
+
|
| 169 |
+
save_path = os.path.join(wkdir, 'test_train', filename)
|
| 170 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
| 171 |
+
plt.imsave(save_path, merge)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def create_dir_for_validate_meta(work_dir, iter_id):
|
| 175 |
+
curr_folders = glob.glob(work_dir + '/online_val/*0')
|
| 176 |
+
curr_folders = [i for i in curr_folders if os.path.isdir(i)]
|
| 177 |
+
if len(curr_folders) > 8:
|
| 178 |
+
curr_folders.sort()
|
| 179 |
+
del_foler = curr_folders.pop(0)
|
| 180 |
+
print(del_foler)
|
| 181 |
+
if main_process():
|
| 182 |
+
# only rank==0 do it
|
| 183 |
+
if os.path.exists(del_foler):
|
| 184 |
+
shutil.rmtree(del_foler)
|
| 185 |
+
if os.path.exists(del_foler + '.html'):
|
| 186 |
+
os.remove(del_foler + '.html')
|
| 187 |
+
|
| 188 |
+
save_val_meta_data_dir = os.path.join(work_dir, 'online_val', '%08d'%iter_id)
|
| 189 |
+
os.makedirs(save_val_meta_data_dir, exist_ok=True)
|
| 190 |
+
return save_val_meta_data_dir
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def vis_surface_normal(normal: torch.tensor, mask: torch.tensor=None) -> np.array:
|
| 194 |
+
"""
|
| 195 |
+
Visualize surface normal. Transfer surface normal value from [-1, 1] to [0, 255]
|
| 196 |
+
Aargs:
|
| 197 |
+
normal (torch.tensor, [h, w, 3]): surface normal
|
| 198 |
+
mask (torch.tensor, [h, w]): valid masks
|
| 199 |
+
"""
|
| 200 |
+
normal = normal.cpu().numpy().squeeze()
|
| 201 |
+
n_img_L2 = np.sqrt(np.sum(normal ** 2, axis=2, keepdims=True))
|
| 202 |
+
n_img_norm = normal / (n_img_L2 + 1e-8)
|
| 203 |
+
normal_vis = n_img_norm * 127
|
| 204 |
+
normal_vis += 128
|
| 205 |
+
normal_vis = normal_vis.astype(np.uint8)
|
| 206 |
+
if mask is not None:
|
| 207 |
+
mask = mask.cpu().numpy().squeeze()
|
| 208 |
+
normal_vis[~mask] = 0
|
| 209 |
+
return normal_vis
|
external/Metric3D/training/mono/utils/weather_aug_utils.py
ADDED
|
@@ -0,0 +1,872 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# import glob
|
| 3 |
+
import cv2 as cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
# import matplotlib.pyplot as plt
|
| 6 |
+
import random
|
| 7 |
+
import math
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
###################### HLS #############################
|
| 11 |
+
|
| 12 |
+
def hls(image,src='RGB'):
|
| 13 |
+
verify_image(image)
|
| 14 |
+
if(is_list(image)):
|
| 15 |
+
image_HLS=[]
|
| 16 |
+
image_list=image
|
| 17 |
+
for img in image_list:
|
| 18 |
+
eval('image_HLS.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2HLS))')
|
| 19 |
+
else:
|
| 20 |
+
image_HLS = eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2HLS)')
|
| 21 |
+
return image_HLS
|
| 22 |
+
|
| 23 |
+
def hue(image,src='RGB'):
|
| 24 |
+
verify_image(image)
|
| 25 |
+
if(is_list(image)):
|
| 26 |
+
image_Hue=[]
|
| 27 |
+
image_list=image
|
| 28 |
+
for img in image_list:
|
| 29 |
+
image_Hue.append(hls(img,src)[:,:,0])
|
| 30 |
+
else:
|
| 31 |
+
image_Hue= hls(image,src)[:,:,0]
|
| 32 |
+
return image_Hue
|
| 33 |
+
|
| 34 |
+
def lightness(image,src='RGB'):
|
| 35 |
+
verify_image(image)
|
| 36 |
+
if(is_list(image)):
|
| 37 |
+
image_lightness=[]
|
| 38 |
+
image_list=image
|
| 39 |
+
for img in image_list:
|
| 40 |
+
image_lightness.append(hls(img,src)[:,:,1])
|
| 41 |
+
else:
|
| 42 |
+
image_lightness= hls(image,src)[:,:,1]
|
| 43 |
+
return image_lightness
|
| 44 |
+
|
| 45 |
+
def saturation(image,src='RGB'):
|
| 46 |
+
verify_image(image)
|
| 47 |
+
if(is_list(image)):
|
| 48 |
+
image_saturation=[]
|
| 49 |
+
image_list=image
|
| 50 |
+
for img in image_list:
|
| 51 |
+
image_saturation.append(hls(img,src)[:,:,2])
|
| 52 |
+
else:
|
| 53 |
+
image_saturation= hls(image,src)[:,:,2]
|
| 54 |
+
return image_saturation
|
| 55 |
+
|
| 56 |
+
###################### HSV #############################
|
| 57 |
+
|
| 58 |
+
def hsv(image,src='RGB'):
|
| 59 |
+
verify_image(image)
|
| 60 |
+
if(is_list(image)):
|
| 61 |
+
image_HSV=[]
|
| 62 |
+
image_list=image
|
| 63 |
+
for img in image_list:
|
| 64 |
+
eval('image_HSV.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2HSV))')
|
| 65 |
+
else:
|
| 66 |
+
image_HSV = eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2HSV)')
|
| 67 |
+
return image_HSV
|
| 68 |
+
|
| 69 |
+
def value(image,src='RGB'):
|
| 70 |
+
verify_image(image)
|
| 71 |
+
if(is_list(image)):
|
| 72 |
+
image_value=[]
|
| 73 |
+
image_list=image
|
| 74 |
+
for img in image_list:
|
| 75 |
+
image_value.append(hsv(img,src)[:,:,2])
|
| 76 |
+
else:
|
| 77 |
+
image_value= hsv(image,src)[:,:,2]
|
| 78 |
+
return image_value
|
| 79 |
+
|
| 80 |
+
###################### BGR #############################
|
| 81 |
+
|
| 82 |
+
def bgr(image, src='RGB'):
|
| 83 |
+
verify_image(image)
|
| 84 |
+
if(is_list(image)):
|
| 85 |
+
image_BGR=[]
|
| 86 |
+
image_list=image
|
| 87 |
+
for img in image_list:
|
| 88 |
+
eval('image_BGR.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2BGR))')
|
| 89 |
+
else:
|
| 90 |
+
image_BGR= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2BGR)')
|
| 91 |
+
return image_BGR
|
| 92 |
+
|
| 93 |
+
###################### RGB #############################
|
| 94 |
+
def rgb(image, src='BGR'):
|
| 95 |
+
verify_image(image)
|
| 96 |
+
if(is_list(image)):
|
| 97 |
+
image_RGB=[]
|
| 98 |
+
image_list=image
|
| 99 |
+
for img in image_list:
|
| 100 |
+
eval('image_RGB.append(cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB))')
|
| 101 |
+
else:
|
| 102 |
+
image_RGB= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)')
|
| 103 |
+
return image_RGB
|
| 104 |
+
|
| 105 |
+
def red(image,src='BGR'):
|
| 106 |
+
verify_image(image)
|
| 107 |
+
if(is_list(image)):
|
| 108 |
+
image_red=[]
|
| 109 |
+
image_list=image
|
| 110 |
+
for img in image_list:
|
| 111 |
+
i= eval('cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB)')
|
| 112 |
+
image_red.append(i[:,:,0])
|
| 113 |
+
else:
|
| 114 |
+
image_red= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)[:,:,0]')
|
| 115 |
+
return image_red
|
| 116 |
+
|
| 117 |
+
def green(image,src='BGR'):
|
| 118 |
+
verify_image(image)
|
| 119 |
+
if(is_list(image)):
|
| 120 |
+
image_green=[]
|
| 121 |
+
image_list=image
|
| 122 |
+
for img in image_list:
|
| 123 |
+
i= eval('cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB)')
|
| 124 |
+
image_green.append(i[:,:,1])
|
| 125 |
+
else:
|
| 126 |
+
image_green= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)[:,:,1]')
|
| 127 |
+
return image_green
|
| 128 |
+
|
| 129 |
+
def blue(image,src='BGR'):
|
| 130 |
+
verify_image(image)
|
| 131 |
+
if(is_list(image)):
|
| 132 |
+
image_blue=[]
|
| 133 |
+
image_list=image
|
| 134 |
+
for img in image_list:
|
| 135 |
+
i=eval('cv2.cvtColor(img,cv2.COLOR_'+src.upper()+'2RGB)')
|
| 136 |
+
image_blue.append(i[:,:,2])
|
| 137 |
+
else:
|
| 138 |
+
image_blue= eval('cv2.cvtColor(image,cv2.COLOR_'+src.upper()+'2RGB)[:,:,2]')
|
| 139 |
+
return image_blue
|
| 140 |
+
|
| 141 |
+
err_not_np_img= "not a numpy array or list of numpy array"
|
| 142 |
+
err_img_arr_empty="Image array is empty"
|
| 143 |
+
err_row_zero="No. of rows can't be <=0"
|
| 144 |
+
err_column_zero="No. of columns can't be <=0"
|
| 145 |
+
err_invalid_size="Not a valid size tuple (x,y)"
|
| 146 |
+
err_caption_array_count="Caption array length doesn't matches the image array length"
|
| 147 |
+
|
| 148 |
+
def is_numpy_array(x):
|
| 149 |
+
|
| 150 |
+
return isinstance(x, np.ndarray)
|
| 151 |
+
def is_tuple(x):
|
| 152 |
+
return type(x) is tuple
|
| 153 |
+
def is_list(x):
|
| 154 |
+
return type(x) is list
|
| 155 |
+
def is_numeric(x):
|
| 156 |
+
return type(x) is int
|
| 157 |
+
def is_numeric_list_or_tuple(x):
|
| 158 |
+
for i in x:
|
| 159 |
+
if not is_numeric(i):
|
| 160 |
+
return False
|
| 161 |
+
return True
|
| 162 |
+
|
| 163 |
+
err_brightness_coeff="brightness coeff can only be between 0.0 to 1.0"
|
| 164 |
+
err_darkness_coeff="darkness coeff can only be between 0.0 to 1.0"
|
| 165 |
+
|
| 166 |
+
def change_light(image, coeff):
|
| 167 |
+
image_HLS = cv2.cvtColor(image,cv2.COLOR_RGB2HLS) ## Conversion to HLS
|
| 168 |
+
image_HLS = np.array(image_HLS, dtype = np.float64)
|
| 169 |
+
image_HLS[:,:,1] = image_HLS[:,:,1]*coeff ## scale pixel values up or down for channel 1(Lightness)
|
| 170 |
+
if(coeff>1):
|
| 171 |
+
image_HLS[:,:,1][image_HLS[:,:,1]>255] = 255 ##Sets all values above 255 to 255
|
| 172 |
+
else:
|
| 173 |
+
image_HLS[:,:,1][image_HLS[:,:,1]<0]=0
|
| 174 |
+
image_HLS = np.array(image_HLS, dtype = np.uint8)
|
| 175 |
+
image_RGB = cv2.cvtColor(image_HLS,cv2.COLOR_HLS2RGB) ## Conversion to RGB
|
| 176 |
+
return image_RGB
|
| 177 |
+
|
| 178 |
+
def verify_image(image):
|
| 179 |
+
if is_numpy_array(image):
|
| 180 |
+
pass
|
| 181 |
+
elif(is_list(image)):
|
| 182 |
+
image_list=image
|
| 183 |
+
for img in image_list:
|
| 184 |
+
if not is_numpy_array(img):
|
| 185 |
+
raise Exception(err_not_np_img)
|
| 186 |
+
else:
|
| 187 |
+
raise Exception(err_not_np_img)
|
| 188 |
+
|
| 189 |
+
def brighten(image, brightness_coeff=-1): ##function to brighten the image
|
| 190 |
+
verify_image(image)
|
| 191 |
+
if(brightness_coeff!=-1):
|
| 192 |
+
if(brightness_coeff<0.0 or brightness_coeff>1.0):
|
| 193 |
+
raise Exception(err_brightness_coeff)
|
| 194 |
+
if(is_list(image)):
|
| 195 |
+
image_RGB=[]
|
| 196 |
+
image_list=image
|
| 197 |
+
for img in image_list:
|
| 198 |
+
if(brightness_coeff==-1):
|
| 199 |
+
brightness_coeff_t=1+ random.uniform(0,1) ## coeff between 1.0 and 1.5
|
| 200 |
+
else:
|
| 201 |
+
brightness_coeff_t=1+ brightness_coeff ## coeff between 1.0 and 2.0
|
| 202 |
+
image_RGB.append(change_light(img,brightness_coeff_t))
|
| 203 |
+
else:
|
| 204 |
+
if(brightness_coeff==-1):
|
| 205 |
+
brightness_coeff_t=1+ random.uniform(0,1) ## coeff between 1.0 and 1.5
|
| 206 |
+
else:
|
| 207 |
+
brightness_coeff_t=1+ brightness_coeff ## coeff between 1.0 and 2.0
|
| 208 |
+
image_RGB= change_light(image,brightness_coeff_t)
|
| 209 |
+
return image_RGB
|
| 210 |
+
|
| 211 |
+
def darken(image, darkness_coeff=-1): ##function to darken the image
|
| 212 |
+
verify_image(image)
|
| 213 |
+
if(darkness_coeff!=-1):
|
| 214 |
+
if(darkness_coeff<0.0 or darkness_coeff>1.0):
|
| 215 |
+
raise Exception(err_darkness_coeff)
|
| 216 |
+
|
| 217 |
+
if(is_list(image)):
|
| 218 |
+
image_RGB=[]
|
| 219 |
+
image_list=image
|
| 220 |
+
for img in image_list:
|
| 221 |
+
if(darkness_coeff==-1):
|
| 222 |
+
darkness_coeff_t=1- random.uniform(0,1)
|
| 223 |
+
else:
|
| 224 |
+
darkness_coeff_t=1- darkness_coeff
|
| 225 |
+
image_RGB.append(change_light(img,darkness_coeff_t))
|
| 226 |
+
else:
|
| 227 |
+
if(darkness_coeff==-1):
|
| 228 |
+
darkness_coeff_t=1- random.uniform(0,1)
|
| 229 |
+
else:
|
| 230 |
+
darkness_coeff_t=1- darkness_coeff
|
| 231 |
+
image_RGB= change_light(image,darkness_coeff_t)
|
| 232 |
+
return image_RGB
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def random_brightness(image):
|
| 236 |
+
verify_image(image)
|
| 237 |
+
|
| 238 |
+
if(is_list(image)):
|
| 239 |
+
image_RGB=[]
|
| 240 |
+
image_list=image
|
| 241 |
+
for img in image_list:
|
| 242 |
+
random_brightness_coefficient = 2* np.random.uniform(0,1) ## generates value between 0.0 and 2.0
|
| 243 |
+
image_RGB.append(change_light(img,random_brightness_coefficient))
|
| 244 |
+
else:
|
| 245 |
+
random_brightness_coefficient = 2* np.random.uniform(0,1) ## generates value between 0.0 and 2.0
|
| 246 |
+
image_RGB= change_light(image,random_brightness_coefficient)
|
| 247 |
+
return image_RGB
|
| 248 |
+
|
| 249 |
+
err_shadow_count="only 1-10 shadows can be introduced in an image"
|
| 250 |
+
err_invalid_rectangular_roi="Rectangular ROI dimensions are not valid"
|
| 251 |
+
err_shadow_dimension="polygons with dim<3 dont exist and >10 take time to plot"
|
| 252 |
+
|
| 253 |
+
def generate_shadow_coordinates(imshape, no_of_shadows, rectangular_roi, shadow_dimension):
|
| 254 |
+
vertices_list=[]
|
| 255 |
+
x1=rectangular_roi[0]
|
| 256 |
+
y1=rectangular_roi[1]
|
| 257 |
+
x2=rectangular_roi[2]
|
| 258 |
+
y2=rectangular_roi[3]
|
| 259 |
+
for index in range(no_of_shadows):
|
| 260 |
+
vertex=[]
|
| 261 |
+
for dimensions in range(shadow_dimension): ## Dimensionality of the shadow polygon
|
| 262 |
+
vertex.append((random.randint(x1, x2),random.randint(y1, y2)))
|
| 263 |
+
vertices = np.array([vertex], dtype=np.int32) ## single shadow vertices
|
| 264 |
+
vertices_list.append(vertices)
|
| 265 |
+
return vertices_list ## List of shadow vertices
|
| 266 |
+
|
| 267 |
+
def shadow_process(image,no_of_shadows,x1,y1,x2,y2, shadow_dimension):
|
| 268 |
+
image_HLS = cv2.cvtColor(image,cv2.COLOR_RGB2HLS) ## Conversion to HLS
|
| 269 |
+
mask = np.zeros_like(image)
|
| 270 |
+
imshape = image.shape
|
| 271 |
+
vertices_list= generate_shadow_coordinates(imshape, no_of_shadows,(x1,y1,x2,y2), shadow_dimension) #3 getting list of shadow vertices
|
| 272 |
+
for vertices in vertices_list:
|
| 273 |
+
cv2.fillPoly(mask, vertices, 255) ## adding all shadow polygons on empty mask, single 255 denotes only red channel
|
| 274 |
+
image_HLS[:,:,1][mask[:,:,0]==255] = image_HLS[:,:,1][mask[:,:,0]==255]*0.5 ## if red channel is hot, image's "Lightness" channel's brightness is lowered
|
| 275 |
+
image_RGB = cv2.cvtColor(image_HLS,cv2.COLOR_HLS2RGB) ## Conversion to RGB
|
| 276 |
+
return image_RGB
|
| 277 |
+
|
| 278 |
+
def add_shadow(image,no_of_shadows=1,rectangular_roi=(-1,-1,-1,-1), shadow_dimension=5):## ROI:(top-left x1,y1, bottom-right x2,y2), shadow_dimension=no. of sides of polygon generated
|
| 279 |
+
verify_image(image)
|
| 280 |
+
if not(is_numeric(no_of_shadows) and no_of_shadows>=1 and no_of_shadows<=10):
|
| 281 |
+
raise Exception(err_shadow_count)
|
| 282 |
+
if not(is_numeric(shadow_dimension) and shadow_dimension>=3 and shadow_dimension<=10):
|
| 283 |
+
raise Exception(err_shadow_dimension)
|
| 284 |
+
if is_tuple(rectangular_roi) and is_numeric_list_or_tuple(rectangular_roi) and len(rectangular_roi)==4:
|
| 285 |
+
x1=rectangular_roi[0]
|
| 286 |
+
y1=rectangular_roi[1]
|
| 287 |
+
x2=rectangular_roi[2]
|
| 288 |
+
y2=rectangular_roi[3]
|
| 289 |
+
else:
|
| 290 |
+
raise Exception(err_invalid_rectangular_roi)
|
| 291 |
+
if rectangular_roi==(-1,-1,-1,-1):
|
| 292 |
+
x1=0
|
| 293 |
+
|
| 294 |
+
if(is_numpy_array(image)):
|
| 295 |
+
y1=image.shape[0]//2
|
| 296 |
+
x2=image.shape[1]
|
| 297 |
+
y2=image.shape[0]
|
| 298 |
+
else:
|
| 299 |
+
y1=image[0].shape[0]//2
|
| 300 |
+
x2=image[0].shape[1]
|
| 301 |
+
y2=image[0].shape[0]
|
| 302 |
+
|
| 303 |
+
elif x1==-1 or y1==-1 or x2==-1 or y2==-1 or x2<=x1 or y2<=y1:
|
| 304 |
+
raise Exception(err_invalid_rectangular_roi)
|
| 305 |
+
if(is_list(image)):
|
| 306 |
+
image_RGB=[]
|
| 307 |
+
image_list=image
|
| 308 |
+
for img in image_list:
|
| 309 |
+
output=shadow_process(img,no_of_shadows,x1,y1,x2,y2, shadow_dimension)
|
| 310 |
+
image_RGB.append(output)
|
| 311 |
+
else:
|
| 312 |
+
output=shadow_process(image,no_of_shadows,x1,y1,x2,y2, shadow_dimension)
|
| 313 |
+
image_RGB = output
|
| 314 |
+
|
| 315 |
+
return image_RGB
|
| 316 |
+
|
| 317 |
+
err_snow_coeff="Snow coeff can only be between 0 and 1"
|
| 318 |
+
def snow_process(image,snow_coeff):
|
| 319 |
+
image_HLS = cv2.cvtColor(image,cv2.COLOR_RGB2HLS) ## Conversion to HLS
|
| 320 |
+
image_HLS = np.array(image_HLS, dtype = np.float64)
|
| 321 |
+
brightness_coefficient = 2.5
|
| 322 |
+
imshape = image.shape
|
| 323 |
+
snow_point=snow_coeff ## increase this for more snow
|
| 324 |
+
image_HLS[:,:,1][image_HLS[:,:,1]<snow_point] = image_HLS[:,:,1][image_HLS[:,:,1]<snow_point]*brightness_coefficient ## scale pixel values up for channel 1(Lightness)
|
| 325 |
+
image_HLS[:,:,1][image_HLS[:,:,1]>255] = 255 ##Sets all values above 255 to 255
|
| 326 |
+
image_HLS = np.array(image_HLS, dtype = np.uint8)
|
| 327 |
+
image_RGB = cv2.cvtColor(image_HLS,cv2.COLOR_HLS2RGB) ## Conversion to RGB
|
| 328 |
+
return image_RGB
|
| 329 |
+
|
| 330 |
+
def add_snow(image, snow_coeff=-1):
|
| 331 |
+
verify_image(image)
|
| 332 |
+
if(snow_coeff!=-1):
|
| 333 |
+
if(snow_coeff<0.0 or snow_coeff>1.0):
|
| 334 |
+
raise Exception(err_snow_coeff)
|
| 335 |
+
else:
|
| 336 |
+
snow_coeff=random.uniform(0,1)
|
| 337 |
+
snow_coeff*=255/2
|
| 338 |
+
snow_coeff+=255/3
|
| 339 |
+
if(is_list(image)):
|
| 340 |
+
image_RGB=[]
|
| 341 |
+
image_list=image
|
| 342 |
+
for img in image_list:
|
| 343 |
+
output= snow_process(img,snow_coeff)
|
| 344 |
+
image_RGB.append(output)
|
| 345 |
+
else:
|
| 346 |
+
output= snow_process(image,snow_coeff)
|
| 347 |
+
image_RGB=output
|
| 348 |
+
|
| 349 |
+
return image_RGB
|
| 350 |
+
|
| 351 |
+
err_rain_slant="Numeric value between -20 and 20 is allowed"
|
| 352 |
+
err_rain_width="Width value between 1 and 5 is allowed"
|
| 353 |
+
err_rain_length="Length value between 0 and 100 is allowed"
|
| 354 |
+
def generate_random_lines(imshape,slant,drop_length,rain_type):
|
| 355 |
+
drops=[]
|
| 356 |
+
area=imshape[0]*imshape[1]
|
| 357 |
+
no_of_drops=area//600
|
| 358 |
+
|
| 359 |
+
if rain_type.lower()=='drizzle':
|
| 360 |
+
no_of_drops=area//770
|
| 361 |
+
drop_length=10
|
| 362 |
+
elif rain_type.lower()=='heavy':
|
| 363 |
+
drop_length=30
|
| 364 |
+
elif rain_type.lower()=='torrential':
|
| 365 |
+
no_of_drops=area//500
|
| 366 |
+
drop_length=60
|
| 367 |
+
|
| 368 |
+
for i in range(no_of_drops): ## If You want heavy rain, try increasing this
|
| 369 |
+
if slant<0:
|
| 370 |
+
x= np.random.randint(slant,imshape[1])
|
| 371 |
+
else:
|
| 372 |
+
x= np.random.randint(0,imshape[1]-slant)
|
| 373 |
+
y= np.random.randint(0,imshape[0]-drop_length)
|
| 374 |
+
drops.append((x,y))
|
| 375 |
+
return drops,drop_length
|
| 376 |
+
|
| 377 |
+
def rain_process(image,slant,drop_length,drop_color,drop_width,rain_drops):
|
| 378 |
+
imshape = image.shape
|
| 379 |
+
image_t= image.copy()
|
| 380 |
+
for rain_drop in rain_drops:
|
| 381 |
+
cv2.line(image_t,(rain_drop[0],rain_drop[1]),(rain_drop[0]+slant,rain_drop[1]+drop_length),drop_color,drop_width)
|
| 382 |
+
image= cv2.blur(image_t,(7,7)) ## rainy view are blurry
|
| 383 |
+
brightness_coefficient = 0.7 ## rainy days are usually shady
|
| 384 |
+
image_HLS = hls(image) ## Conversion to HLS
|
| 385 |
+
image_HLS[:,:,1] = image_HLS[:,:,1]*brightness_coefficient ## scale pixel values down for channel 1(Lightness)
|
| 386 |
+
image_RGB= rgb(image_HLS,'hls') ## Conversion to RGB
|
| 387 |
+
return image_RGB
|
| 388 |
+
|
| 389 |
+
##rain_type='drizzle','heavy','torrential'
|
| 390 |
+
def add_rain(image,slant=-1,drop_length=20,drop_width=1,drop_color=(200,200,200),rain_type='None'): ## (200,200,200) a shade of gray
|
| 391 |
+
verify_image(image)
|
| 392 |
+
slant_extreme=slant
|
| 393 |
+
if not(is_numeric(slant_extreme) and (slant_extreme>=-20 and slant_extreme<=20)or slant_extreme==-1):
|
| 394 |
+
raise Exception(err_rain_slant)
|
| 395 |
+
if not(is_numeric(drop_width) and drop_width>=1 and drop_width<=5):
|
| 396 |
+
raise Exception(err_rain_width)
|
| 397 |
+
if not(is_numeric(drop_length) and drop_length>=0 and drop_length<=100):
|
| 398 |
+
raise Exception(err_rain_length)
|
| 399 |
+
|
| 400 |
+
if(is_list(image)):
|
| 401 |
+
image_RGB=[]
|
| 402 |
+
image_list=image
|
| 403 |
+
imshape = image[0].shape
|
| 404 |
+
if slant_extreme==-1:
|
| 405 |
+
slant= np.random.randint(-10,10) ##generate random slant if no slant value is given
|
| 406 |
+
rain_drops,drop_length= generate_random_lines(imshape,slant,drop_length,rain_type)
|
| 407 |
+
for img in image_list:
|
| 408 |
+
output= rain_process(img,slant_extreme,drop_length,drop_color,drop_width,rain_drops)
|
| 409 |
+
image_RGB.append(output)
|
| 410 |
+
else:
|
| 411 |
+
imshape = image.shape
|
| 412 |
+
if slant_extreme==-1:
|
| 413 |
+
slant= np.random.randint(-10,10) ##generate random slant if no slant value is given
|
| 414 |
+
rain_drops,drop_length= generate_random_lines(imshape,slant,drop_length,rain_type)
|
| 415 |
+
output= rain_process(image,slant_extreme,drop_length,drop_color,drop_width,rain_drops)
|
| 416 |
+
image_RGB=output
|
| 417 |
+
|
| 418 |
+
return image_RGB
|
| 419 |
+
|
| 420 |
+
err_fog_coeff="Fog coeff can only be between 0 and 1"
|
| 421 |
+
def add_blur(image, x,y,hw,fog_coeff):
|
| 422 |
+
overlay= image.copy()
|
| 423 |
+
output= image.copy()
|
| 424 |
+
alpha= 0.08*fog_coeff
|
| 425 |
+
rad= hw//2
|
| 426 |
+
point=(x+hw//2, y+hw//2)
|
| 427 |
+
cv2.circle(overlay,point, int(rad), (255,255,255), -1)
|
| 428 |
+
cv2.addWeighted(overlay, alpha, output, 1 -alpha ,0, output)
|
| 429 |
+
return output
|
| 430 |
+
|
| 431 |
+
def generate_random_blur_coordinates(imshape,hw):
|
| 432 |
+
blur_points=[]
|
| 433 |
+
midx= imshape[1]//2-2*hw
|
| 434 |
+
midy= imshape[0]//2-hw
|
| 435 |
+
index=1
|
| 436 |
+
while(midx>-hw or midy>-hw):
|
| 437 |
+
for i in range(hw//10*index):
|
| 438 |
+
x= np.random.randint(midx,imshape[1]-midx-hw)
|
| 439 |
+
y= np.random.randint(midy,imshape[0]-midy-hw)
|
| 440 |
+
blur_points.append((x,y))
|
| 441 |
+
midx-=3*hw*imshape[1]//sum(imshape)
|
| 442 |
+
midy-=3*hw*imshape[0]//sum(imshape)
|
| 443 |
+
index+=1
|
| 444 |
+
return blur_points
|
| 445 |
+
|
| 446 |
+
def add_fog(image, fog_coeff=-1):
|
| 447 |
+
verify_image(image)
|
| 448 |
+
|
| 449 |
+
if(fog_coeff!=-1):
|
| 450 |
+
if(fog_coeff<0.0 or fog_coeff>1.0):
|
| 451 |
+
raise Exception(err_fog_coeff)
|
| 452 |
+
if(is_list(image)):
|
| 453 |
+
image_RGB=[]
|
| 454 |
+
image_list=image
|
| 455 |
+
imshape = image[0].shape
|
| 456 |
+
|
| 457 |
+
for img in image_list:
|
| 458 |
+
if fog_coeff==-1:
|
| 459 |
+
fog_coeff_t=random.uniform(0.3,1)
|
| 460 |
+
else:
|
| 461 |
+
fog_coeff_t=fog_coeff
|
| 462 |
+
hw=int(imshape[1]//3*fog_coeff_t)
|
| 463 |
+
haze_list= generate_random_blur_coordinates(imshape,hw)
|
| 464 |
+
for haze_points in haze_list:
|
| 465 |
+
img= add_blur(img, haze_points[0],haze_points[1], hw,fog_coeff_t) ## adding all shadow polygons on empty mask, single 255 denotes only red channel
|
| 466 |
+
img = cv2.blur(img ,(hw//10,hw//10))
|
| 467 |
+
image_RGB.append(img)
|
| 468 |
+
else:
|
| 469 |
+
imshape = image.shape
|
| 470 |
+
if fog_coeff==-1:
|
| 471 |
+
fog_coeff_t=random.uniform(0.3,1)
|
| 472 |
+
else:
|
| 473 |
+
fog_coeff_t=fog_coeff
|
| 474 |
+
hw=int(imshape[1]//3*fog_coeff_t)
|
| 475 |
+
haze_list= generate_random_blur_coordinates(imshape,hw)
|
| 476 |
+
for haze_points in haze_list:
|
| 477 |
+
image= add_blur(image, haze_points[0],haze_points[1], hw,fog_coeff_t)
|
| 478 |
+
image = cv2.blur(image ,(hw//10,hw//10))
|
| 479 |
+
image_RGB = image
|
| 480 |
+
|
| 481 |
+
return image_RGB
|
| 482 |
+
|
| 483 |
+
def generate_gravel_patch(rectangular_roi):
|
| 484 |
+
x1=rectangular_roi[0]
|
| 485 |
+
y1=rectangular_roi[1]
|
| 486 |
+
x2=rectangular_roi[2]
|
| 487 |
+
y2=rectangular_roi[3]
|
| 488 |
+
gravels=[]
|
| 489 |
+
area= abs((x2-x1)*(y2-y1))
|
| 490 |
+
for i in range((int)(area//10)):
|
| 491 |
+
x= np.random.randint(x1,x2)
|
| 492 |
+
y= np.random.randint(y1,y2)
|
| 493 |
+
gravels.append((x,y))
|
| 494 |
+
return gravels
|
| 495 |
+
|
| 496 |
+
def gravel_process(image,x1,x2,y1,y2,no_of_patches):
|
| 497 |
+
x=image.shape[1]
|
| 498 |
+
y=image.shape[0]
|
| 499 |
+
rectangular_roi_default=[]
|
| 500 |
+
for i in range(no_of_patches):
|
| 501 |
+
xx1=random.randint(x1, x2)
|
| 502 |
+
xx2=random.randint(x1, xx1)
|
| 503 |
+
yy1=random.randint(y1, y2)
|
| 504 |
+
yy2=random.randint(y1, yy1)
|
| 505 |
+
rectangular_roi_default.append((xx2,yy2,min(xx1,xx2+200),min(yy1,yy2+30)))
|
| 506 |
+
img_hls=hls(image)
|
| 507 |
+
for roi in rectangular_roi_default:
|
| 508 |
+
gravels= generate_gravel_patch(roi)
|
| 509 |
+
for gravel in gravels:
|
| 510 |
+
x=gravel[0]
|
| 511 |
+
y=gravel[1]
|
| 512 |
+
r=random.randint(1, 4)
|
| 513 |
+
r1=random.randint(0, 255)
|
| 514 |
+
img_hls[max(y-r,0):min(y+r,y),max(x-r,0):min(x+r,x),1]=r1
|
| 515 |
+
image_RGB= rgb(img_hls,'hls')
|
| 516 |
+
return image_RGB
|
| 517 |
+
|
| 518 |
+
def add_gravel(image,rectangular_roi=(-1,-1,-1,-1), no_of_patches=8):
|
| 519 |
+
verify_image(image)
|
| 520 |
+
if is_tuple(rectangular_roi) and is_numeric_list_or_tuple(rectangular_roi) and len(rectangular_roi)==4:
|
| 521 |
+
x1=rectangular_roi[0]
|
| 522 |
+
y1=rectangular_roi[1]
|
| 523 |
+
x2=rectangular_roi[2]
|
| 524 |
+
y2=rectangular_roi[3]
|
| 525 |
+
else:
|
| 526 |
+
raise Exception(err_invalid_rectangular_roi)
|
| 527 |
+
if rectangular_roi==(-1,-1,-1,-1):
|
| 528 |
+
if(is_numpy_array(image)):
|
| 529 |
+
x1=0
|
| 530 |
+
y1=int(image.shape[0]*3/4)
|
| 531 |
+
x2=image.shape[1]
|
| 532 |
+
y2=image.shape[0]
|
| 533 |
+
else:
|
| 534 |
+
x1=0
|
| 535 |
+
y1=int(image[0].shape[0]*3/4)
|
| 536 |
+
x2=image[0].shape[1]
|
| 537 |
+
y2=image[0].shape[0]
|
| 538 |
+
elif x1==-1 or y1==-1 or x2==-1 or y2==-1 or x2<=x1 or y2<=y1:
|
| 539 |
+
raise Exception(err_invalid_rectangular_roi)
|
| 540 |
+
color=[0,255]
|
| 541 |
+
if(is_list(image)):
|
| 542 |
+
image_RGB=[]
|
| 543 |
+
image_list=image
|
| 544 |
+
for img in image_list:
|
| 545 |
+
output= gravel_process(img,x1,x2,y1,y2,no_of_patches)
|
| 546 |
+
image_RGB.append(output)
|
| 547 |
+
else:
|
| 548 |
+
output= gravel_process(image,x1,x2,y1,y2,no_of_patches)
|
| 549 |
+
image_RGB= output
|
| 550 |
+
return image_RGB
|
| 551 |
+
|
| 552 |
+
err_flare_circle_count="Numeric value between 0 and 20 is allowed"
|
| 553 |
+
def flare_source(image, point,radius,src_color):
|
| 554 |
+
overlay= image.copy()
|
| 555 |
+
output= image.copy()
|
| 556 |
+
num_times=radius//10
|
| 557 |
+
alpha= np.linspace(0.0,1,num= num_times)
|
| 558 |
+
rad= np.linspace(1,radius, num=num_times)
|
| 559 |
+
for i in range(num_times):
|
| 560 |
+
cv2.circle(overlay,point, int(rad[i]), src_color, -1)
|
| 561 |
+
alp=alpha[num_times-i-1]*alpha[num_times-i-1]*alpha[num_times-i-1]
|
| 562 |
+
cv2.addWeighted(overlay, alp, output, 1 -alp ,0, output)
|
| 563 |
+
return output
|
| 564 |
+
|
| 565 |
+
def add_sun_flare_line(flare_center,angle,imshape):
|
| 566 |
+
x=[]
|
| 567 |
+
y=[]
|
| 568 |
+
i=0
|
| 569 |
+
for rand_x in range(0,imshape[1],10):
|
| 570 |
+
rand_y= math.tan(angle)*(rand_x-flare_center[0])+flare_center[1]
|
| 571 |
+
x.append(rand_x)
|
| 572 |
+
y.append(2*flare_center[1]-rand_y)
|
| 573 |
+
return x,y
|
| 574 |
+
|
| 575 |
+
def add_sun_process(image, no_of_flare_circles,flare_center,src_radius,x,y,src_color):
|
| 576 |
+
overlay= image.copy()
|
| 577 |
+
output= image.copy()
|
| 578 |
+
imshape=image.shape
|
| 579 |
+
for i in range(no_of_flare_circles):
|
| 580 |
+
alpha=random.uniform(0.05,0.2)
|
| 581 |
+
r=random.randint(0, len(x)-1)
|
| 582 |
+
rad=random.randint(1, imshape[0]//100-2)
|
| 583 |
+
cv2.circle(overlay,(int(x[r]),int(y[r])), rad*rad*rad, (random.randint(max(src_color[0]-50,0), src_color[0]),random.randint(max(src_color[1]-50,0), src_color[1]),random.randint(max(src_color[2]-50,0), src_color[2])), -1)
|
| 584 |
+
cv2.addWeighted(overlay, alpha, output, 1 - alpha,0, output)
|
| 585 |
+
output= flare_source(output,(int(flare_center[0]),int(flare_center[1])),src_radius,src_color)
|
| 586 |
+
return output
|
| 587 |
+
|
| 588 |
+
def add_sun_flare(image,flare_center=-1, angle=-1, no_of_flare_circles=8,src_radius=400, src_color=(255,255,255)):
|
| 589 |
+
verify_image(image)
|
| 590 |
+
if(angle!=-1):
|
| 591 |
+
angle=angle%(2*math.pi)
|
| 592 |
+
if not(no_of_flare_circles>=0 and no_of_flare_circles<=20):
|
| 593 |
+
raise Exception(err_flare_circle_count)
|
| 594 |
+
if(is_list(image)):
|
| 595 |
+
image_RGB=[]
|
| 596 |
+
image_list=image
|
| 597 |
+
imshape=image_list[0].shape
|
| 598 |
+
for img in image_list:
|
| 599 |
+
if(angle==-1):
|
| 600 |
+
angle_t=random.uniform(0,2*math.pi)
|
| 601 |
+
if angle_t==math.pi/2:
|
| 602 |
+
angle_t=0
|
| 603 |
+
else:
|
| 604 |
+
angle_t=angle
|
| 605 |
+
if flare_center==-1:
|
| 606 |
+
flare_center_t=(random.randint(0,imshape[1]),random.randint(0,imshape[0]//2))
|
| 607 |
+
else:
|
| 608 |
+
flare_center_t=flare_center
|
| 609 |
+
x,y= add_sun_flare_line(flare_center_t,angle_t,imshape)
|
| 610 |
+
output= add_sun_process(img, no_of_flare_circles,flare_center_t,src_radius,x,y,src_color)
|
| 611 |
+
image_RGB.append(output)
|
| 612 |
+
else:
|
| 613 |
+
imshape=image.shape
|
| 614 |
+
if(angle==-1):
|
| 615 |
+
angle_t=random.uniform(0,2*math.pi)
|
| 616 |
+
if angle_t==math.pi/2:
|
| 617 |
+
angle_t=0
|
| 618 |
+
else:
|
| 619 |
+
angle_t=angle
|
| 620 |
+
if flare_center==-1:
|
| 621 |
+
flare_center_t=(random.randint(0,imshape[1]),random.randint(0,imshape[0]//2))
|
| 622 |
+
else:
|
| 623 |
+
flare_center_t=flare_center
|
| 624 |
+
x,y= add_sun_flare_line(flare_center_t,angle_t,imshape)
|
| 625 |
+
output= add_sun_process(image, no_of_flare_circles,flare_center_t,src_radius,x,y,src_color)
|
| 626 |
+
image_RGB = output
|
| 627 |
+
return image_RGB
|
| 628 |
+
|
| 629 |
+
err_speed_coeff="Speed coeff can only be between 0 and 1"
|
| 630 |
+
def apply_motion_blur(image,count):
|
| 631 |
+
image_t=image.copy()
|
| 632 |
+
imshape=image_t.shape
|
| 633 |
+
size=15
|
| 634 |
+
kernel_motion_blur = np.zeros((size, size))
|
| 635 |
+
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
|
| 636 |
+
kernel_motion_blur = kernel_motion_blur / size
|
| 637 |
+
i= imshape[1]*3//4 - 10*count
|
| 638 |
+
while(i<=imshape[1]):
|
| 639 |
+
image_t[:,i:,:] = cv2.filter2D(image_t[:,i:,:], -1, kernel_motion_blur)
|
| 640 |
+
image_t[:,:imshape[1]-i,:] = cv2.filter2D(image_t[:,:imshape[1]-i,:], -1, kernel_motion_blur)
|
| 641 |
+
i+=imshape[1]//25-count
|
| 642 |
+
count+=1
|
| 643 |
+
image_RGB=image_t
|
| 644 |
+
return image_RGB
|
| 645 |
+
|
| 646 |
+
def add_speed(image, speed_coeff=-1):
|
| 647 |
+
verify_image(image)
|
| 648 |
+
if(speed_coeff !=-1):
|
| 649 |
+
if(speed_coeff<0.0 or speed_coeff>1.0):
|
| 650 |
+
raise Exception(err_speed_coeff)
|
| 651 |
+
if(is_list(image)):
|
| 652 |
+
image_RGB=[]
|
| 653 |
+
image_list=image
|
| 654 |
+
for img in image_list:
|
| 655 |
+
if(speed_coeff==-1):
|
| 656 |
+
count_t=int(15*random.uniform(0,1))
|
| 657 |
+
else:
|
| 658 |
+
count_t=int(15*speed_coeff)
|
| 659 |
+
img=apply_motion_blur(img,count_t)
|
| 660 |
+
image_RGB.append(img)
|
| 661 |
+
else:
|
| 662 |
+
if(speed_coeff==-1):
|
| 663 |
+
count_t=int(15*random.uniform(0,1))
|
| 664 |
+
else:
|
| 665 |
+
count_t=int(15*speed_coeff)
|
| 666 |
+
image_RGB= apply_motion_blur(image,count_t)
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
return image_RGB
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
def autumn_process(image):
|
| 674 |
+
image_t=image.copy()
|
| 675 |
+
imshape=image_t.shape
|
| 676 |
+
image_hls= hls(image_t)
|
| 677 |
+
step=8
|
| 678 |
+
aut_colors=[1,5,9,11]
|
| 679 |
+
col= aut_colors[random.randint(0,3)]
|
| 680 |
+
for i in range(0,imshape[1],step):
|
| 681 |
+
for j in range(0,imshape[0],step):
|
| 682 |
+
avg=np.average(image_hls[j:j+step,i:i+step,0])
|
| 683 |
+
# print(avg)
|
| 684 |
+
if(avg >20 and avg< 100 and np.average(image[j:j+step,i:i+step,1])<100):
|
| 685 |
+
image_hls[j:j+step,i:i+step,0]= col
|
| 686 |
+
image_hls[j:j+step,i:i+step,2]=255
|
| 687 |
+
return rgb(image_hls,'hls')
|
| 688 |
+
|
| 689 |
+
|
| 690 |
+
def add_autumn(image):
|
| 691 |
+
verify_image(image)
|
| 692 |
+
|
| 693 |
+
if(is_list(image)):
|
| 694 |
+
image_RGB=[]
|
| 695 |
+
image_list=image
|
| 696 |
+
for img in image_list:
|
| 697 |
+
|
| 698 |
+
img=autumn_process(img)
|
| 699 |
+
image_RGB.append(img)
|
| 700 |
+
else:
|
| 701 |
+
image=autumn_process(image)
|
| 702 |
+
image_RGB= image
|
| 703 |
+
|
| 704 |
+
return image_RGB
|
| 705 |
+
|
| 706 |
+
def fliph(image): ##function to flip the image on horizontal axis
|
| 707 |
+
verify_image(image)
|
| 708 |
+
|
| 709 |
+
if(is_list(image)):
|
| 710 |
+
image_RGB=[]
|
| 711 |
+
image_list=image
|
| 712 |
+
for img in image_list:
|
| 713 |
+
image_RGB.append(cv2.flip(img,0))
|
| 714 |
+
else:
|
| 715 |
+
image_RGB= cv2.flip(image,0)
|
| 716 |
+
return image_RGB
|
| 717 |
+
|
| 718 |
+
def flipv(image): ##function to flip the image on vertical axis
|
| 719 |
+
verify_image(image)
|
| 720 |
+
|
| 721 |
+
if(is_list(image)):
|
| 722 |
+
image_RGB=[]
|
| 723 |
+
image_list=image
|
| 724 |
+
for img in image_list:
|
| 725 |
+
image_RGB.append(cv2.flip(img,1))
|
| 726 |
+
else:
|
| 727 |
+
image_RGB= cv2.flip(image,1)
|
| 728 |
+
return image_RGB
|
| 729 |
+
|
| 730 |
+
def random_flip(image): ##function to flip the image on horizontal axis
|
| 731 |
+
verify_image(image)
|
| 732 |
+
|
| 733 |
+
if(is_list(image)):
|
| 734 |
+
image_RGB=[]
|
| 735 |
+
image_list=image
|
| 736 |
+
for img in image_list:
|
| 737 |
+
p= random.uniform(0,1)
|
| 738 |
+
if(p>0.5):
|
| 739 |
+
image_RGB.append(cv2.flip(img,0))
|
| 740 |
+
else:
|
| 741 |
+
image_RGB.append(cv2.flip(img,1))
|
| 742 |
+
else:
|
| 743 |
+
p= random.uniform(0,1)
|
| 744 |
+
if(p>0.5):
|
| 745 |
+
image_RGB=cv2.flip(image,0)
|
| 746 |
+
else:
|
| 747 |
+
image_RGB=cv2.flip(image,1)
|
| 748 |
+
return image_RGB
|
| 749 |
+
|
| 750 |
+
def manhole_process(image,center,height,width,src_color=(0,0,0)):
|
| 751 |
+
overlay= image.copy()
|
| 752 |
+
output= image.copy()
|
| 753 |
+
# cv2.ellipse(overlay, center =center,box=None,color =src_color)
|
| 754 |
+
cv2.ellipse(overlay, center, (width,height), 0, 0, 360, src_color, -1)
|
| 755 |
+
# cv2.circle(overlay, center, radius, src_color, -1)
|
| 756 |
+
alp=1
|
| 757 |
+
cv2.addWeighted(overlay, alp, output, 1 -alp ,0, output)
|
| 758 |
+
return output
|
| 759 |
+
|
| 760 |
+
err_invalid_center_manhole="center should be in the format (x,y)"
|
| 761 |
+
err_invalid_height_width_manhole="height and width should be positive integers."
|
| 762 |
+
def add_manhole(image,center=-1,color=(120,120,120),height=1,width=1, type='closed'): ##function to flip the image on horizontal axis
|
| 763 |
+
verify_image(image)
|
| 764 |
+
|
| 765 |
+
if(center!=-1):
|
| 766 |
+
if not(is_tuple(center) and is_numeric_list_or_tuple(center) and len(center)==2):
|
| 767 |
+
raise Exception(err_invalid_center_manhole)
|
| 768 |
+
if not (is_numeric(height) and is_numeric(width) and height>0 and width>0):
|
| 769 |
+
raise Exception(err_invalid_height_width_manhole)
|
| 770 |
+
if color==(120,120,120):
|
| 771 |
+
if type=='closed':
|
| 772 |
+
color=(67,70,75)
|
| 773 |
+
elif type=='open':
|
| 774 |
+
color=(0,0,0)
|
| 775 |
+
|
| 776 |
+
if(is_list(image)):
|
| 777 |
+
image_RGB=[]
|
| 778 |
+
image_list=image
|
| 779 |
+
for img in image_list:
|
| 780 |
+
height_t=height
|
| 781 |
+
width_t=width
|
| 782 |
+
center_t=center
|
| 783 |
+
if height==1:
|
| 784 |
+
height_t=img.shape[0]//25
|
| 785 |
+
if width==1:
|
| 786 |
+
width_t=int(img.shape[0]*3//25)
|
| 787 |
+
if center==-1:
|
| 788 |
+
center_t= (img.shape[0]-100, img.shape[1]//2)
|
| 789 |
+
image_RGB.append(manhole_process(img,center_t,height_t,width_t,color))
|
| 790 |
+
else:
|
| 791 |
+
height_t=height
|
| 792 |
+
width_t=width
|
| 793 |
+
center_t=center
|
| 794 |
+
if height==1:
|
| 795 |
+
height_t=image.shape[0]//25
|
| 796 |
+
if width==1:
|
| 797 |
+
width_t=int(image.shape[0]*3//25)
|
| 798 |
+
if center==-1:
|
| 799 |
+
center= (image.shape[0]-100, image.shape[1]//2)
|
| 800 |
+
image_RGB= manhole_process(image,center_t,height_t,width_t,color)
|
| 801 |
+
return image_RGB
|
| 802 |
+
|
| 803 |
+
def exposure_process(image):
|
| 804 |
+
image= np.copy(image)
|
| 805 |
+
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
|
| 806 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4))
|
| 807 |
+
ones= np.ones(img_yuv[:,:,0].shape)
|
| 808 |
+
ones[img_yuv[:,:,0]>150]= 0.85
|
| 809 |
+
img_yuv[:,:,0]= img_yuv[:,:,0]*ones
|
| 810 |
+
|
| 811 |
+
img_yuv[:,:,0] = clahe.apply(img_yuv[:,:,0])
|
| 812 |
+
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
|
| 813 |
+
img_yuv[:,:,0] = clahe.apply(img_yuv[:,:,0])
|
| 814 |
+
|
| 815 |
+
image_res = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
|
| 816 |
+
image_res= cv2.fastNlMeansDenoisingColored(image_res,None,3,3,7,21)
|
| 817 |
+
return image_res
|
| 818 |
+
|
| 819 |
+
def correct_exposure(image):
|
| 820 |
+
verify_image(image)
|
| 821 |
+
if(is_list(image)):
|
| 822 |
+
image_RGB=[]
|
| 823 |
+
image_list=image
|
| 824 |
+
for img in image_list:
|
| 825 |
+
image_RGB.append(exposure_process(img))
|
| 826 |
+
else:
|
| 827 |
+
image_RGB= exposure_process(image)
|
| 828 |
+
return image_RGB
|
| 829 |
+
|
| 830 |
+
err_aug_type='wrong augmentation function is defined'
|
| 831 |
+
err_aug_list_type='aug_types should be a list of string function names'
|
| 832 |
+
err_aug_volume='volume type can only be "same" or "expand"'
|
| 833 |
+
def augment_random(image, aug_types="", volume='expand' ):
|
| 834 |
+
|
| 835 |
+
aug_types_all=["random_brightness","add_shadow","add_snow","add_rain","add_fog","add_gravel","add_sun_flare","add_speed","add_autumn","random_flip","add_manhole"]
|
| 836 |
+
if aug_types=="":
|
| 837 |
+
aug_types=aug_types_all
|
| 838 |
+
output=[]
|
| 839 |
+
if not(is_list(aug_types)):
|
| 840 |
+
raise Exception(err_aug_list_type)
|
| 841 |
+
|
| 842 |
+
if volume=='expand':
|
| 843 |
+
for aug_type in aug_types:
|
| 844 |
+
|
| 845 |
+
if not(aug_type in aug_types_all):
|
| 846 |
+
raise Exception(err_aug_type)
|
| 847 |
+
command=aug_type+'(image)'
|
| 848 |
+
result=eval(command)
|
| 849 |
+
if(is_list(result)):
|
| 850 |
+
output+=result
|
| 851 |
+
else:
|
| 852 |
+
output.append(result)
|
| 853 |
+
elif volume=='same':
|
| 854 |
+
verify_image(image)
|
| 855 |
+
for aug_type in aug_types:
|
| 856 |
+
if not(aug_type in aug_types_all):
|
| 857 |
+
raise Exception(err_aug_type)
|
| 858 |
+
if(is_list(image)):
|
| 859 |
+
image_list=image
|
| 860 |
+
for img in image_list:
|
| 861 |
+
selected_aug=aug_types[random.randint(0,len(aug_types)-1)]
|
| 862 |
+
command=selected_aug+'(img)'
|
| 863 |
+
output.append(eval(command))
|
| 864 |
+
else:
|
| 865 |
+
selected_aug=aug_types[random.randint(0,len(aug_types)-1)]
|
| 866 |
+
command=selected_aug+'(image)'
|
| 867 |
+
output=eval(command)
|
| 868 |
+
|
| 869 |
+
else:
|
| 870 |
+
raise Exception(err_aug_volume)
|
| 871 |
+
|
| 872 |
+
return output
|