Add files using upload-large-folder tool
Browse files- external/Metric3D/training/mono/configs/test_configs_vit/ddad.vit.dpt.raft.py +94 -0
- external/Metric3D/training/mono/configs/test_configs_vit/diode.vit.dpt.raft.py +66 -0
- external/Metric3D/training/mono/configs/test_configs_vit/eth3d.vit.dpt.raft.py +70 -0
- external/Metric3D/training/mono/configs/test_configs_vit/ibims.vit.dpt.raft.py +71 -0
- external/Metric3D/training/mono/configs/test_configs_vit/kitti.vit.dpt.raft.py +82 -0
- external/Metric3D/training/mono/configs/test_configs_vit/nuscenes.vit.dpt.raft.py +93 -0
- external/Metric3D/training/mono/configs/test_configs_vit/nyu.vit.dpt.raft.py +64 -0
- external/Metric3D/training/mono/configs/test_configs_vit/replica.vit.dpt.raft.py +64 -0
- external/Metric3D/training/mono/configs/test_configs_vit/scannet.vit.dpt.raft.py +66 -0
- external/Metric3D/training/mono/model/__base_model__.py +288 -0
- external/Metric3D/training/mono/model/__init__.py +6 -0
- external/Metric3D/training/mono/model/backbones/ConvNeXt.py +271 -0
- external/Metric3D/training/mono/model/backbones/ViT_DINO.py +1504 -0
- external/Metric3D/training/mono/model/backbones/ViT_DINO_reg.py +1099 -0
- external/Metric3D/training/mono/model/backbones/__init__.py +8 -0
- external/Metric3D/training/mono/model/criterion.py +62 -0
- external/Metric3D/training/mono/model/decode_heads/RAFTDepthNormalDPTDecoder5.py +818 -0
- external/Metric3D/training/mono/model/decode_heads/__init__.py +4 -0
- external/Metric3D/training/mono/model/losses/AdabinsLoss.py +101 -0
- external/Metric3D/training/mono/model/losses/ConfidenceGuideLoss.py +54 -0
- external/Metric3D/training/mono/model/losses/ConfidenceLoss.py +22 -0
- external/Metric3D/training/mono/model/losses/GRUSequenceLoss.py +181 -0
- external/Metric3D/training/mono/model/losses/Gradient.py +121 -0
- external/Metric3D/training/mono/model/losses/HDNL.py +95 -0
- external/Metric3D/training/mono/model/losses/HDNL_random.py +104 -0
- external/Metric3D/training/mono/model/losses/HDSNL.py +82 -0
- external/Metric3D/training/mono/model/losses/HDSNL_random.py +230 -0
- external/Metric3D/training/mono/model/losses/L1.py +63 -0
- external/Metric3D/training/mono/model/losses/NormalBranchLoss.py +732 -0
- external/Metric3D/training/mono/model/losses/NormalRegression.py +418 -0
- external/Metric3D/training/mono/model/losses/PWN_Planes.py +291 -0
- external/Metric3D/training/mono/model/losses/Ranking.py +342 -0
- external/Metric3D/training/mono/model/losses/Regularization.py +18 -0
- external/Metric3D/training/mono/model/losses/SSIL.py +56 -0
- external/Metric3D/training/mono/model/losses/ScaleAlignLoss.py +57 -0
- external/Metric3D/training/mono/model/losses/ScaleInvL1.py +35 -0
- external/Metric3D/training/mono/model/losses/SiLog.py +38 -0
- external/Metric3D/training/mono/model/losses/SkyRegularization.py +79 -0
- external/Metric3D/training/mono/model/losses/VNL.py +260 -0
- external/Metric3D/training/mono/model/losses/WCEL.py +157 -0
- external/Metric3D/training/mono/model/losses/__init__.py +32 -0
- external/Metric3D/training/mono/model/losses/depth_to_normal.py +302 -0
- external/Metric3D/training/mono/model/losses/photometric_loss_functions.py +300 -0
- external/Metric3D/training/mono/model/model_pipelines/__init__.py +6 -0
- external/Metric3D/training/mono/model/model_pipelines/dense_pipeline.py +27 -0
- external/Metric3D/training/mono/model/model_pipelines/model_pipeline.py +34 -0
- external/Metric3D/training/mono/model/monodepth_model.py +45 -0
- external/Metric3D/training/mono/scripts/test_scripts/test_vit.sh +5 -0
- external/Metric3D/training/mono/scripts/train_scripts/train.sh +7 -0
- external/Metric3D/training/mono/scripts/train_scripts/train_kitti.sh +8 -0
external/Metric3D/training/mono/configs/test_configs_vit/ddad.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/ddad.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model = dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(DDAD='DDAD_dataset'),
|
| 32 |
+
],
|
| 33 |
+
]
|
| 34 |
+
data_basic=dict(
|
| 35 |
+
canonical_space = dict(
|
| 36 |
+
# img_size=(540, 960),
|
| 37 |
+
focal_length=1000.0,
|
| 38 |
+
),
|
| 39 |
+
depth_range=(0,1),
|
| 40 |
+
depth_normalize=(0.1, 200),
|
| 41 |
+
crop_size = (1120, 2016),
|
| 42 |
+
clip_depth_range=(0.1, 200),
|
| 43 |
+
vit_size=(616,1064),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3']
|
| 48 |
+
DDAD_dataset=dict(
|
| 49 |
+
data = dict(
|
| 50 |
+
test=dict(
|
| 51 |
+
anno_path='DDAD/annotations/test_annotations.json',
|
| 52 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 53 |
+
dict(type='LabelScaleCononical'),
|
| 54 |
+
dict(type='ResizeKeepRatio',
|
| 55 |
+
# resize_size=(1216, 1952), #(544, 992), #
|
| 56 |
+
# resize_size=(560, 1008),
|
| 57 |
+
# resize_size=(840, 1512),
|
| 58 |
+
resize_size=(616,1064),
|
| 59 |
+
ignore_label=-1,
|
| 60 |
+
padding=[0,0,0]),
|
| 61 |
+
# dict(type='ResizeKeepRatio',
|
| 62 |
+
# resize_size=(1120, 2016),
|
| 63 |
+
# ignore_label=-1,
|
| 64 |
+
# padding=[0,0,0],
|
| 65 |
+
# keep_gt=True),
|
| 66 |
+
# dict(type='RandomCrop',
|
| 67 |
+
# crop_size=(0,0),
|
| 68 |
+
# crop_type='center',
|
| 69 |
+
# ignore_label=-1,
|
| 70 |
+
# padding=[0,0,0]),
|
| 71 |
+
dict(type='ToTensor'),
|
| 72 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 73 |
+
],
|
| 74 |
+
sample_ratio = 1.0,
|
| 75 |
+
sample_size = 500,
|
| 76 |
+
),
|
| 77 |
+
))
|
| 78 |
+
|
| 79 |
+
# DDAD_dataset=dict(
|
| 80 |
+
# data = dict(
|
| 81 |
+
# test=dict(
|
| 82 |
+
# anno_path='DDAD/annotations/test_annotations.json',
|
| 83 |
+
# pipeline=[dict(type='BGR2RGB'),
|
| 84 |
+
# dict(type='KeepResizeCanoSize',
|
| 85 |
+
# resize_size=(640, 1088), #(1216, 1952), #(512, 960), #
|
| 86 |
+
# ignore_label=-1,
|
| 87 |
+
# padding=[0, 0, 0]),
|
| 88 |
+
# dict(type='ToTensor'),
|
| 89 |
+
# dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 90 |
+
# ],
|
| 91 |
+
# sample_ratio = 1.0,
|
| 92 |
+
# sample_size = 80,
|
| 93 |
+
# ),
|
| 94 |
+
# ))
|
external/Metric3D/training/mono/configs/test_configs_vit/diode.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/diode.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model=dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(DIODE='DIODE_dataset'),
|
| 32 |
+
#dict(DIODE_indoor='DIODE_dataset')
|
| 33 |
+
#dict(DIODE_outdoor='DIODE_dataset')
|
| 34 |
+
],
|
| 35 |
+
]
|
| 36 |
+
data_basic=dict(
|
| 37 |
+
canonical_space = dict(
|
| 38 |
+
img_size=(540, 960),
|
| 39 |
+
focal_length=1000.0,
|
| 40 |
+
),
|
| 41 |
+
depth_range=(0, 1),
|
| 42 |
+
depth_normalize=(0.1, 200),# (0.3, 160),
|
| 43 |
+
# crop_size = (512, 960),
|
| 44 |
+
clip_depth_range=(0.1, 150),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# indoor (544, 928), outdoor: (768, 1088)
|
| 50 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'normal_median' , 'normal_mean', 'normal_rmse', 'normal_a1', 'normal_a2', 'normal_a3', 'normal_a4', 'normal_a5']
|
| 51 |
+
DIODE_dataset=dict(
|
| 52 |
+
data = dict(
|
| 53 |
+
test=dict(
|
| 54 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 55 |
+
dict(type='LabelScaleCononical'),
|
| 56 |
+
dict(type='ResizeKeepRatio',
|
| 57 |
+
resize_size=(616, 1064), #(544, 992), #(768, 1088), #(768, 1120), # (768, 1216), #(768, 1024), # (768, 1216), #(768, 1312), #
|
| 58 |
+
ignore_label=-1,
|
| 59 |
+
padding=[0,0,0]),
|
| 60 |
+
dict(type='ToTensor'),
|
| 61 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 62 |
+
],
|
| 63 |
+
sample_ratio = 1.0,
|
| 64 |
+
sample_size = -1,
|
| 65 |
+
),
|
| 66 |
+
))
|
external/Metric3D/training/mono/configs/test_configs_vit/eth3d.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/eth3d.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model = dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(ETH3D='ETH3D_dataset'), #447.2w
|
| 32 |
+
],
|
| 33 |
+
]
|
| 34 |
+
data_basic=dict(
|
| 35 |
+
canonical_space = dict(
|
| 36 |
+
# img_size=(540, 960),
|
| 37 |
+
focal_length=1000.0,
|
| 38 |
+
),
|
| 39 |
+
depth_range=(0, 1),
|
| 40 |
+
depth_normalize=(0.1, 200),# (0.3, 160),
|
| 41 |
+
crop_size = (1120, 2016),
|
| 42 |
+
clip_depth_range=(0.1, 200),
|
| 43 |
+
vit_size=(616,1064),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# indoor (544, 928), outdoor: (768, 1088)
|
| 47 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'normal_mean', 'normal_rmse', 'normal_a1']
|
| 48 |
+
ETH3D_dataset=dict(
|
| 49 |
+
data = dict(
|
| 50 |
+
test=dict(
|
| 51 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 52 |
+
dict(type='LabelScaleCononical'),
|
| 53 |
+
dict(type='ResizeKeepRatio',
|
| 54 |
+
# resize_size=(512, 512), #(768, 1088), #(768, 1120), # (768, 1216), #(768, 1024), # (768, 1216), #(768, 1312), # (512, 512)
|
| 55 |
+
resize_size=(616,1064),
|
| 56 |
+
# resize_size=(1120, 2016),
|
| 57 |
+
ignore_label=-1,
|
| 58 |
+
padding=[0,0,0]),
|
| 59 |
+
# dict(type='RandomCrop',
|
| 60 |
+
# crop_size=(0,0),
|
| 61 |
+
# crop_type='center',
|
| 62 |
+
# ignore_label=-1,
|
| 63 |
+
# padding=[0,0,0]),
|
| 64 |
+
dict(type='ToTensor'),
|
| 65 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 66 |
+
],
|
| 67 |
+
sample_ratio = 1.0,
|
| 68 |
+
sample_size = -1,
|
| 69 |
+
),
|
| 70 |
+
))
|
external/Metric3D/training/mono/configs/test_configs_vit/ibims.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/ibims.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model = dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(IBIMS='IBIMS_dataset'), #447.2w
|
| 32 |
+
],
|
| 33 |
+
]
|
| 34 |
+
data_basic=dict(
|
| 35 |
+
canonical_space = dict(
|
| 36 |
+
# img_size=(540, 960),
|
| 37 |
+
focal_length=1000.0,
|
| 38 |
+
),
|
| 39 |
+
depth_range=(0, 1),
|
| 40 |
+
depth_normalize=(0.1, 200),# (0.3, 160),
|
| 41 |
+
crop_size = (1120, 2016),
|
| 42 |
+
clip_depth_range=(0.1, 10),
|
| 43 |
+
vit_size=(616,1064),
|
| 44 |
+
)
|
| 45 |
+
clip_depth = True
|
| 46 |
+
|
| 47 |
+
# indoor (544, 928), outdoor: (768, 1088)
|
| 48 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'normal_mean', 'normal_rmse', 'normal_a3', 'normal_a4', 'normal_a5', 'normal_median']
|
| 49 |
+
IBIMS_dataset=dict(
|
| 50 |
+
data = dict(
|
| 51 |
+
test=dict(
|
| 52 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 53 |
+
dict(type='LabelScaleCononical'),
|
| 54 |
+
dict(type='ResizeKeepRatio',
|
| 55 |
+
# resize_size=(512, 512), #(768, 1088), #(768, 1120), # (768, 1216), #(768, 1024), # (768, 1216), #(768, 1312), # (512, 512)
|
| 56 |
+
resize_size=(616,1064),
|
| 57 |
+
# resize_size=(1120, 2016),
|
| 58 |
+
ignore_label=-1,
|
| 59 |
+
padding=[0,0,0]),
|
| 60 |
+
# dict(type='RandomCrop',
|
| 61 |
+
# crop_size=(0,0),
|
| 62 |
+
# crop_type='center',
|
| 63 |
+
# ignore_label=-1,
|
| 64 |
+
# padding=[0,0,0]),
|
| 65 |
+
dict(type='ToTensor'),
|
| 66 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 67 |
+
],
|
| 68 |
+
sample_ratio = 1.0,
|
| 69 |
+
sample_size = -1,
|
| 70 |
+
),
|
| 71 |
+
))
|
external/Metric3D/training/mono/configs/test_configs_vit/kitti.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/kitti.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model = dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(KITTI='KITTI_dataset'),
|
| 32 |
+
],
|
| 33 |
+
]
|
| 34 |
+
data_basic=dict(
|
| 35 |
+
canonical_space = dict(
|
| 36 |
+
# img_size=(540, 960),
|
| 37 |
+
focal_length=1000.0,
|
| 38 |
+
),
|
| 39 |
+
depth_range=(0,1),
|
| 40 |
+
depth_normalize=(0.1, 200),
|
| 41 |
+
crop_size = (1120, 2016),
|
| 42 |
+
clip_depth_range=(0.1, 80),
|
| 43 |
+
vit_size=(616,1064),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
clip_depth = True
|
| 47 |
+
|
| 48 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'rmse_log',
|
| 49 |
+
'log10']
|
| 50 |
+
KITTI_dataset=dict(
|
| 51 |
+
data = dict(
|
| 52 |
+
test=dict(
|
| 53 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 54 |
+
dict(type='LabelScaleCononical'),
|
| 55 |
+
dict(type='ResizeKeepRatio',
|
| 56 |
+
resize_size=(616, 1064), #(416, 1248), #(480, 1216), #(512, 1088), #(512, 1312), #(480, 1248), # #
|
| 57 |
+
ignore_label=-1,
|
| 58 |
+
padding=[0,0,0]),
|
| 59 |
+
dict(type='ToTensor'),
|
| 60 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 61 |
+
],
|
| 62 |
+
sample_ratio = 1.0,
|
| 63 |
+
sample_size = -1,
|
| 64 |
+
),
|
| 65 |
+
))
|
| 66 |
+
|
| 67 |
+
# DDAD_dataset=dict(
|
| 68 |
+
# data = dict(
|
| 69 |
+
# test=dict(
|
| 70 |
+
# anno_path='DDAD/annotations/test_annotations.json',
|
| 71 |
+
# pipeline=[dict(type='BGR2RGB'),
|
| 72 |
+
# dict(type='KeepResizeCanoSize',
|
| 73 |
+
# resize_size=(640, 1088), #(1216, 1952), #(512, 960), #
|
| 74 |
+
# ignore_label=-1,
|
| 75 |
+
# padding=[0, 0, 0]),
|
| 76 |
+
# dict(type='ToTensor'),
|
| 77 |
+
# dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 78 |
+
# ],
|
| 79 |
+
# sample_ratio = 1.0,
|
| 80 |
+
# sample_size = 80,
|
| 81 |
+
# ),
|
| 82 |
+
# ))
|
external/Metric3D/training/mono/configs/test_configs_vit/nuscenes.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/nuscenes.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model = dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(NuScenes='NuScenes_dataset'),
|
| 32 |
+
],
|
| 33 |
+
]
|
| 34 |
+
data_basic=dict(
|
| 35 |
+
canonical_space = dict(
|
| 36 |
+
# img_size=(540, 960),
|
| 37 |
+
focal_length=1000.0,
|
| 38 |
+
),
|
| 39 |
+
depth_range=(0,1),
|
| 40 |
+
depth_normalize=(0.1, 200),
|
| 41 |
+
crop_size = (1120, 2016),
|
| 42 |
+
clip_depth_range=(0.1, 200),
|
| 43 |
+
vit_size=(616,1064),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3']
|
| 48 |
+
NuScenes_dataset=dict(
|
| 49 |
+
data = dict(
|
| 50 |
+
test=dict(
|
| 51 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 52 |
+
dict(type='LabelScaleCononical'),
|
| 53 |
+
dict(type='ResizeKeepRatio',
|
| 54 |
+
# resize_size=(1216, 1952), #(544, 992), #
|
| 55 |
+
# resize_size=(560, 1008),
|
| 56 |
+
# resize_size=(840, 1512),
|
| 57 |
+
resize_size=(616,1064),
|
| 58 |
+
ignore_label=-1,
|
| 59 |
+
padding=[0,0,0]),
|
| 60 |
+
# dict(type='ResizeKeepRatio',
|
| 61 |
+
# resize_size=(1120, 2016),
|
| 62 |
+
# ignore_label=-1,
|
| 63 |
+
# padding=[0,0,0],
|
| 64 |
+
# keep_gt=True),
|
| 65 |
+
# dict(type='RandomCrop',
|
| 66 |
+
# crop_size=(0,0),
|
| 67 |
+
# crop_type='center',
|
| 68 |
+
# ignore_label=-1,
|
| 69 |
+
# padding=[0,0,0]),
|
| 70 |
+
dict(type='ToTensor'),
|
| 71 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 72 |
+
],
|
| 73 |
+
sample_ratio = 1.0,
|
| 74 |
+
sample_size = 500,
|
| 75 |
+
),
|
| 76 |
+
))
|
| 77 |
+
|
| 78 |
+
# DDAD_dataset=dict(
|
| 79 |
+
# data = dict(
|
| 80 |
+
# test=dict(
|
| 81 |
+
# anno_path='DDAD/annotations/test_annotations.json',
|
| 82 |
+
# pipeline=[dict(type='BGR2RGB'),
|
| 83 |
+
# dict(type='KeepResizeCanoSize',
|
| 84 |
+
# resize_size=(640, 1088), #(1216, 1952), #(512, 960), #
|
| 85 |
+
# ignore_label=-1,
|
| 86 |
+
# padding=[0, 0, 0]),
|
| 87 |
+
# dict(type='ToTensor'),
|
| 88 |
+
# dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 89 |
+
# ],
|
| 90 |
+
# sample_ratio = 1.0,
|
| 91 |
+
# sample_size = 80,
|
| 92 |
+
# ),
|
| 93 |
+
# ))
|
external/Metric3D/training/mono/configs/test_configs_vit/nyu.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/nyu.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model = dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(NYU='NYU_dataset'),
|
| 32 |
+
],
|
| 33 |
+
]
|
| 34 |
+
data_basic=dict(
|
| 35 |
+
canonical_space = dict(
|
| 36 |
+
# img_size=(540, 960),
|
| 37 |
+
focal_length=1000.0,
|
| 38 |
+
),
|
| 39 |
+
depth_range=(0,1),
|
| 40 |
+
depth_normalize=(0.1, 200),
|
| 41 |
+
crop_size = (1120, 2016),
|
| 42 |
+
clip_depth_range=(0.1, 10),
|
| 43 |
+
vit_size=(616,1064),
|
| 44 |
+
)
|
| 45 |
+
clip_depth = True
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'rmse_log', 'log10', 'normal_mean', 'normal_rmse', 'normal_median', 'normal_a3', 'normal_a4', 'normal_a5']
|
| 49 |
+
NYU_dataset=dict(
|
| 50 |
+
data = dict(
|
| 51 |
+
test=dict(
|
| 52 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 53 |
+
dict(type='LabelScaleCononical'),
|
| 54 |
+
dict(type='ResizeKeepRatio',
|
| 55 |
+
resize_size=(616, 1064), #(544, 992), #(480, 1216), #(480, 640), #
|
| 56 |
+
ignore_label=-1,
|
| 57 |
+
padding=[0,0,0]),
|
| 58 |
+
dict(type='ToTensor'),
|
| 59 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 60 |
+
],
|
| 61 |
+
sample_ratio = 1.0,
|
| 62 |
+
sample_size = -1,
|
| 63 |
+
),
|
| 64 |
+
))
|
external/Metric3D/training/mono/configs/test_configs_vit/replica.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/replica.py',
|
| 5 |
+
'../_base_/datasets/_data_base_.py',
|
| 6 |
+
|
| 7 |
+
'../_base_/default_runtime.py',
|
| 8 |
+
'../_base_/schedules/schedule_1m.py'
|
| 9 |
+
]
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
|
| 13 |
+
model=dict(
|
| 14 |
+
decode_head=dict(
|
| 15 |
+
type='RAFTDepthNormalDPT5',
|
| 16 |
+
iters=8,
|
| 17 |
+
n_downsample=2,
|
| 18 |
+
detach=False,
|
| 19 |
+
)
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
# model settings
|
| 23 |
+
find_unused_parameters = True
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# data configs, some similar data are merged together
|
| 28 |
+
data_array = [
|
| 29 |
+
# group 1
|
| 30 |
+
[
|
| 31 |
+
dict(Replica='Replica_dataset'), # 5.6w
|
| 32 |
+
],
|
| 33 |
+
]
|
| 34 |
+
data_basic=dict(
|
| 35 |
+
canonical_space = dict(
|
| 36 |
+
img_size=(540, 960),
|
| 37 |
+
focal_length=1000.0,
|
| 38 |
+
),
|
| 39 |
+
depth_range=(0, 1),
|
| 40 |
+
depth_normalize=(0.1, 200),# (0.3, 160),
|
| 41 |
+
# crop_size = (512, 960),
|
| 42 |
+
clip_depth_range=(0.1, 200),
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# indoor (544, 928), outdoor: (768, 1088)
|
| 48 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'normal_median' , 'normal_mean', 'normal_rmse', 'normal_a1', 'normal_a2', 'normal_a3', 'normal_a4', 'normal_a5']
|
| 49 |
+
Replica_dataset=dict(
|
| 50 |
+
data = dict(
|
| 51 |
+
test=dict(
|
| 52 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 53 |
+
dict(type='LabelScaleCononical'),
|
| 54 |
+
dict(type='ResizeKeepRatio',
|
| 55 |
+
resize_size=(616, 1064), #(544, 992), #(768, 1088), #(768, 1120), # (768, 1216), #(768, 1024), # (768, 1216), #(768, 1312), #
|
| 56 |
+
ignore_label=-1,
|
| 57 |
+
padding=[0,0,0]),
|
| 58 |
+
dict(type='ToTensor'),
|
| 59 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 60 |
+
],
|
| 61 |
+
sample_ratio = 1.0,
|
| 62 |
+
sample_size = -1,
|
| 63 |
+
),
|
| 64 |
+
))
|
external/Metric3D/training/mono/configs/test_configs_vit/scannet.vit.dpt.raft.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_base_=['../_base_/losses/all_losses.py',
|
| 2 |
+
'../_base_/models/encoder_decoder/dino_vit_large_reg.dpt_raft.py',
|
| 3 |
+
|
| 4 |
+
'../_base_/datasets/scannet.py',
|
| 5 |
+
'../_base_/datasets/scannet_all.py',
|
| 6 |
+
#'../_base_/datasets/_data_base_.py',
|
| 7 |
+
|
| 8 |
+
'../_base_/default_runtime.py',
|
| 9 |
+
'../_base_/schedules/schedule_1m.py'
|
| 10 |
+
]
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
model = dict(
|
| 15 |
+
decode_head=dict(
|
| 16 |
+
type='RAFTDepthNormalDPT5',
|
| 17 |
+
iters=8,
|
| 18 |
+
n_downsample=2,
|
| 19 |
+
detach=False,
|
| 20 |
+
)
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# model settings
|
| 24 |
+
find_unused_parameters = True
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# data configs, some similar data are merged together
|
| 29 |
+
data_array = [
|
| 30 |
+
# group 1
|
| 31 |
+
[
|
| 32 |
+
#dict(ScanNet='ScanNet_dataset'),
|
| 33 |
+
dict(ScanNetAll='ScanNetAll_dataset')
|
| 34 |
+
],
|
| 35 |
+
]
|
| 36 |
+
data_basic=dict(
|
| 37 |
+
canonical_space = dict(
|
| 38 |
+
# img_size=(540, 960),
|
| 39 |
+
focal_length=1000.0,
|
| 40 |
+
),
|
| 41 |
+
depth_range=(0,1),
|
| 42 |
+
depth_normalize=(0.1, 200),
|
| 43 |
+
crop_size = (1120, 2016),
|
| 44 |
+
clip_depth_range=(0.1, 200),
|
| 45 |
+
vit_size=(616,1064),
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
test_metrics = ['abs_rel', 'rmse', 'silog', 'delta1', 'delta2', 'delta3', 'rmse_log', 'log10', 'normal_mean', 'normal_rmse', 'normal_median', 'normal_a3', 'normal_a4', 'normal_a5']
|
| 50 |
+
ScanNetAll_dataset=dict(
|
| 51 |
+
#ScanNet_dataset=dict(
|
| 52 |
+
data = dict(
|
| 53 |
+
test=dict(
|
| 54 |
+
pipeline=[dict(type='BGR2RGB'),
|
| 55 |
+
dict(type='LabelScaleCononical'),
|
| 56 |
+
dict(type='ResizeKeepRatio',
|
| 57 |
+
resize_size=(616, 1064), #(544, 992), #(480, 1216), #(480, 640), #
|
| 58 |
+
ignore_label=-1,
|
| 59 |
+
padding=[0,0,0]),
|
| 60 |
+
dict(type='ToTensor'),
|
| 61 |
+
dict(type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375]),
|
| 62 |
+
],
|
| 63 |
+
sample_ratio = 1.0,
|
| 64 |
+
sample_size = 500,
|
| 65 |
+
),
|
| 66 |
+
))
|
external/Metric3D/training/mono/model/__base_model__.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from mono.utils.comm import get_func
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class BaseDepthModel(nn.Module):
|
| 9 |
+
def __init__(self, cfg, criterions, **kwards):
|
| 10 |
+
super(BaseDepthModel, self).__init__()
|
| 11 |
+
model_type = cfg.model.type
|
| 12 |
+
self.depth_model = get_func('mono.model.model_pipelines.' + model_type)(cfg)
|
| 13 |
+
|
| 14 |
+
self.criterions_main = criterions['decoder_losses'] if criterions and 'decoder_losses' in criterions else None
|
| 15 |
+
self.criterions_auxi = criterions['auxi_losses'] if criterions and 'auxi_losses' in criterions else None
|
| 16 |
+
self.criterions_pose = criterions['pose_losses'] if criterions and 'pose_losses' in criterions else None
|
| 17 |
+
self.criterions_gru = criterions['gru_losses'] if criterions and 'gru_losses' in criterions else None
|
| 18 |
+
try:
|
| 19 |
+
self.downsample = cfg.prediction_downsample
|
| 20 |
+
except:
|
| 21 |
+
self.downsample = None
|
| 22 |
+
|
| 23 |
+
self.training = True
|
| 24 |
+
|
| 25 |
+
def forward(self, data):
|
| 26 |
+
if self.downsample != None:
|
| 27 |
+
self.label_downsample(self.downsample, data)
|
| 28 |
+
|
| 29 |
+
output = self.depth_model(**data)
|
| 30 |
+
|
| 31 |
+
losses_dict = {}
|
| 32 |
+
if self.training:
|
| 33 |
+
output.update(data)
|
| 34 |
+
losses_dict = self.get_loss(output)
|
| 35 |
+
|
| 36 |
+
if self.downsample != None:
|
| 37 |
+
self.pred_upsample(self.downsample, output)
|
| 38 |
+
|
| 39 |
+
return output['prediction'], losses_dict, output['confidence']
|
| 40 |
+
|
| 41 |
+
def inference(self, data):
|
| 42 |
+
with torch.no_grad():
|
| 43 |
+
output = self.depth_model(**data)
|
| 44 |
+
output.update(data)
|
| 45 |
+
|
| 46 |
+
if self.downsample != None:
|
| 47 |
+
self.pred_upsample(self.downsample, output)
|
| 48 |
+
|
| 49 |
+
output['dataset'] = 'wild'
|
| 50 |
+
return output
|
| 51 |
+
|
| 52 |
+
def get_loss(self, paras):
|
| 53 |
+
losses_dict = {}
|
| 54 |
+
# Losses for training
|
| 55 |
+
if self.training:
|
| 56 |
+
# decode branch
|
| 57 |
+
losses_dict.update(self.compute_decoder_loss(paras))
|
| 58 |
+
# auxilary branch
|
| 59 |
+
losses_dict.update(self.compute_auxi_loss(paras))
|
| 60 |
+
# pose branch
|
| 61 |
+
losses_dict.update(self.compute_pose_loss(paras))
|
| 62 |
+
# GRU sequence branch
|
| 63 |
+
losses_dict.update(self.compute_gru_loss(paras))
|
| 64 |
+
|
| 65 |
+
total_loss = sum(losses_dict.values())
|
| 66 |
+
losses_dict['total_loss'] = total_loss
|
| 67 |
+
return losses_dict
|
| 68 |
+
|
| 69 |
+
def compute_gru_loss(self, paras_):
|
| 70 |
+
losses_dict = {}
|
| 71 |
+
if self.criterions_gru is None or len(self.criterions_gru) == 0:
|
| 72 |
+
return losses_dict
|
| 73 |
+
paras = {k:v for k,v in paras_.items() if k!='prediction' and k!='prediction_normal'}
|
| 74 |
+
n_predictions = len(paras['predictions_list'])
|
| 75 |
+
for i, pre in enumerate(paras['predictions_list']):
|
| 76 |
+
if i == n_predictions-1:
|
| 77 |
+
break
|
| 78 |
+
#if i % 3 != 0:
|
| 79 |
+
#continue
|
| 80 |
+
if 'normal_out_list' in paras.keys():
|
| 81 |
+
pre_normal = paras['normal_out_list'][i]
|
| 82 |
+
else:
|
| 83 |
+
pre_normal = None
|
| 84 |
+
iter_dict = self.branch_loss(
|
| 85 |
+
prediction=pre,
|
| 86 |
+
prediction_normal=pre_normal,
|
| 87 |
+
criterions=self.criterions_gru,
|
| 88 |
+
branch=f'gru_{i}',
|
| 89 |
+
**paras
|
| 90 |
+
)
|
| 91 |
+
# We adjust the loss_gamma so it is consistent for any number of RAFT-Stereo iterations
|
| 92 |
+
adjusted_loss_gamma = 0.9**(15/(n_predictions - 1))
|
| 93 |
+
i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
|
| 94 |
+
iter_dict = {k:v*i_weight for k,v in iter_dict.items()}
|
| 95 |
+
losses_dict.update(iter_dict)
|
| 96 |
+
return losses_dict
|
| 97 |
+
|
| 98 |
+
def compute_decoder_loss(self, paras):
|
| 99 |
+
losses_dict = {}
|
| 100 |
+
decode_losses_dict = self.branch_loss(
|
| 101 |
+
criterions=self.criterions_main,
|
| 102 |
+
branch='decode',
|
| 103 |
+
**paras
|
| 104 |
+
)
|
| 105 |
+
return decode_losses_dict
|
| 106 |
+
|
| 107 |
+
def compute_auxi_loss(self, paras):
|
| 108 |
+
losses_dict = {}
|
| 109 |
+
if len(self.criterions_auxi) == 0:
|
| 110 |
+
return losses_dict
|
| 111 |
+
args = dict(
|
| 112 |
+
target=paras['target'],
|
| 113 |
+
data_type=paras['data_type'],
|
| 114 |
+
sem_mask=paras['sem_mask'],
|
| 115 |
+
)
|
| 116 |
+
for i, auxi_logit in enumerate(paras['auxi_logit_list']):
|
| 117 |
+
auxi_losses_dict = self.branch_loss(
|
| 118 |
+
prediction=paras['auxi_pred'][i],
|
| 119 |
+
criterions=self.criterions_auxi,
|
| 120 |
+
pred_logit=auxi_logit,
|
| 121 |
+
branch=f'auxi_{i}',
|
| 122 |
+
**args
|
| 123 |
+
)
|
| 124 |
+
losses_dict.update(auxi_losses_dict)
|
| 125 |
+
return losses_dict
|
| 126 |
+
|
| 127 |
+
def compute_pose_loss(self, paras):
|
| 128 |
+
losses_dict = {}
|
| 129 |
+
if self.criterions_pose is None or len(self.criterions_pose) == 0:
|
| 130 |
+
return losses_dict
|
| 131 |
+
# valid_flg = paras['tmpl_flg']
|
| 132 |
+
# if torch.sum(valid_flg) == 0:
|
| 133 |
+
# return losses_dict
|
| 134 |
+
# else:
|
| 135 |
+
# # sample valid batch
|
| 136 |
+
# samples = {}
|
| 137 |
+
# for k, v in paras.items():
|
| 138 |
+
# if isinstance(v, torch.Tensor):
|
| 139 |
+
# samples.update({k: v[valid_flg]})
|
| 140 |
+
# elif isinstance(v, list) and isinstance(v[0], torch.Tensor):
|
| 141 |
+
# samples.update({k: [i[valid_flg] for i in v]})
|
| 142 |
+
for loss_method in self.criterions_pose:
|
| 143 |
+
loss_tmp = loss_method(**paras)
|
| 144 |
+
losses_dict['pose_' + loss_method._get_name()] = loss_tmp
|
| 145 |
+
return losses_dict
|
| 146 |
+
|
| 147 |
+
def branch_loss(self, prediction, pred_logit, criterions, branch='decode', **kwargs):
|
| 148 |
+
B, _, _, _ = prediction.shape
|
| 149 |
+
losses_dict = {}
|
| 150 |
+
args = dict(pred_logit=pred_logit)
|
| 151 |
+
|
| 152 |
+
target = kwargs.pop('target')
|
| 153 |
+
args.update(kwargs)
|
| 154 |
+
|
| 155 |
+
# data type for each batch
|
| 156 |
+
batches_data_type = np.array(kwargs['data_type'])
|
| 157 |
+
# batches_data_names = np.array(kwargs['dataset'])
|
| 158 |
+
|
| 159 |
+
# resize the target
|
| 160 |
+
# if target.shape[2] != prediction.shape[2] and target.shape[3] != prediction.shape[3]:
|
| 161 |
+
# _, _, H, W = prediction.shape
|
| 162 |
+
# target = nn.functional.interpolate(target, (H,W), mode='nearest')
|
| 163 |
+
|
| 164 |
+
mask = target > 1e-8
|
| 165 |
+
for loss_method in criterions:
|
| 166 |
+
# sample batches, which satisfy the loss requirement for data types
|
| 167 |
+
new_mask = self.create_mask_as_loss(loss_method, mask, batches_data_type)
|
| 168 |
+
|
| 169 |
+
loss_tmp = loss_method(
|
| 170 |
+
prediction=prediction,
|
| 171 |
+
target=target,
|
| 172 |
+
mask=new_mask,
|
| 173 |
+
**args)
|
| 174 |
+
losses_dict[branch + '_' + loss_method._get_name()] = loss_tmp
|
| 175 |
+
return losses_dict
|
| 176 |
+
|
| 177 |
+
def create_mask_as_loss(self, loss_method, mask, batches_data_type):
|
| 178 |
+
data_type_req = np.array(loss_method.data_type)[:, None]
|
| 179 |
+
batch_mask = torch.tensor(np.any(data_type_req == batches_data_type, axis=0), device="cuda") #torch.from_numpy(np.any(data_type_req == batches_data_type, axis=0)).cuda()
|
| 180 |
+
new_mask = mask * batch_mask[:, None, None, None]
|
| 181 |
+
return new_mask
|
| 182 |
+
|
| 183 |
+
def label_downsample(self, downsample_factor, data_dict):
|
| 184 |
+
scale_factor = float(1.0 / downsample_factor)
|
| 185 |
+
downsample_target = F.interpolate(data_dict['target'], scale_factor=scale_factor)
|
| 186 |
+
downsample_stereo_depth = F.interpolate(data_dict['stereo_depth'], scale_factor=scale_factor)
|
| 187 |
+
|
| 188 |
+
data_dict['target'] = downsample_target
|
| 189 |
+
data_dict['stereo_depth'] = downsample_stereo_depth
|
| 190 |
+
|
| 191 |
+
return data_dict
|
| 192 |
+
|
| 193 |
+
def pred_upsample(self, downsample_factor, data_dict):
|
| 194 |
+
scale_factor = float(downsample_factor)
|
| 195 |
+
upsample_prediction = F.interpolate(data_dict['prediction'], scale_factor=scale_factor).detach()
|
| 196 |
+
upsample_confidence = F.interpolate(data_dict['confidence'], scale_factor=scale_factor).detach()
|
| 197 |
+
|
| 198 |
+
data_dict['prediction'] = upsample_prediction
|
| 199 |
+
data_dict['confidence'] = upsample_confidence
|
| 200 |
+
|
| 201 |
+
return data_dict
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
# def mask_batches(self, prediction, target, mask, batches_data_names, data_type_req):
|
| 207 |
+
# """
|
| 208 |
+
# Mask the data samples that satify the loss requirement.
|
| 209 |
+
# Args:
|
| 210 |
+
# data_type_req (str): the data type required by a loss.
|
| 211 |
+
# batches_data_names (list): the list of data types in a batch.
|
| 212 |
+
# """
|
| 213 |
+
# batch_mask = np.any(data_type_req == batches_data_names, axis=0)
|
| 214 |
+
# prediction = prediction[batch_mask]
|
| 215 |
+
# target = target[batch_mask]
|
| 216 |
+
# mask = mask[batch_mask]
|
| 217 |
+
# return prediction, target, mask, batch_mask
|
| 218 |
+
|
| 219 |
+
# def update_mask_g8(self, target, mask, prediction, batches_data_names, absRel=0.5):
|
| 220 |
+
# data_type_req=np.array(['Golf8_others'])[:, None]
|
| 221 |
+
|
| 222 |
+
# pred, target, mask_sample, batch_mask = self.mask_batches(prediction, target, mask, batches_data_names, data_type_req)
|
| 223 |
+
# if pred.numel() == 0:
|
| 224 |
+
# return mask
|
| 225 |
+
# scale_batch = []
|
| 226 |
+
# for i in range(mask_sample.shape[0]):
|
| 227 |
+
# scale = torch.median(target[mask_sample]) / (torch.median(pred[mask_sample]) + 1e-8)
|
| 228 |
+
# abs_rel = torch.abs(pred[i:i+1, ...] * scale - target[i:i+1, ...]) / (pred[i:i+1, ...] * scale + 1e-6)
|
| 229 |
+
# if target[i, ...][target[i, ...]>0].min() < 0.041:
|
| 230 |
+
# mask_valid_i = ((abs_rel < absRel) | ((target[i:i+1, ...]<0.02) & (target[i:i+1, ...]>1e-6))) & mask_sample[i:i+1, ...]
|
| 231 |
+
# else:
|
| 232 |
+
# mask_valid_i = mask_sample[i:i+1, ...]
|
| 233 |
+
# mask_sample[i:i+1, ...] = mask_valid_i
|
| 234 |
+
# # print(target.max(), target[target>0].min())
|
| 235 |
+
# # self.visual_g8(target, mask_valid_i)
|
| 236 |
+
# mask[batch_mask] = mask_sample
|
| 237 |
+
# return mask
|
| 238 |
+
|
| 239 |
+
# def update_mask_g8_v2(self, target, mask, prediction, batches_data_names,):
|
| 240 |
+
# data_type_req=np.array(['Golf8_others'])[:, None]
|
| 241 |
+
|
| 242 |
+
# pred, target, mask_sample, batch_mask = self.mask_batches(prediction, target, mask, batches_data_names, data_type_req)
|
| 243 |
+
# if pred.numel() == 0:
|
| 244 |
+
# return mask
|
| 245 |
+
|
| 246 |
+
# raw_invalid_mask = target < 1e-8
|
| 247 |
+
# target[raw_invalid_mask] = 1e8
|
| 248 |
+
# kernal = 31
|
| 249 |
+
# pool = min_pool2d(target, kernal)
|
| 250 |
+
# diff = target- pool
|
| 251 |
+
# valid_mask = (diff < 0.02) & mask_sample & (target<0.3)
|
| 252 |
+
# target_min = target.view(target.shape[0], -1).min(dim=1)[0]
|
| 253 |
+
# w_close = target_min < 0.04
|
| 254 |
+
# valid_mask[~w_close] = mask_sample[~w_close]
|
| 255 |
+
# mask[batch_mask]= valid_mask
|
| 256 |
+
|
| 257 |
+
# target[raw_invalid_mask] = -1
|
| 258 |
+
# #self.visual_g8(target, mask[batch_mask])
|
| 259 |
+
# return mask
|
| 260 |
+
|
| 261 |
+
# def visual_g8(self, gt, mask):
|
| 262 |
+
# import matplotlib.pyplot as plt
|
| 263 |
+
# from mono.utils.transform import gray_to_colormap
|
| 264 |
+
# gt = gt.cpu().numpy().squeeze()
|
| 265 |
+
# mask = mask.cpu().numpy().squeeze()
|
| 266 |
+
# if gt.ndim >2:
|
| 267 |
+
# gt = gt[0, ...]
|
| 268 |
+
# mask = mask[0, ...]
|
| 269 |
+
# name = np.random.randint(1000000)
|
| 270 |
+
# print(gt.max(), gt[gt>0].min(), name)
|
| 271 |
+
# gt_filter = gt.copy()
|
| 272 |
+
# gt_filter[~mask] = 0
|
| 273 |
+
# out = np.concatenate([gt, gt_filter], axis=0)
|
| 274 |
+
# out[out<0] = 0
|
| 275 |
+
# o = gray_to_colormap(out)
|
| 276 |
+
# o[out<1e-8]=0
|
| 277 |
+
|
| 278 |
+
# plt.imsave(f'./tmp/{name}.png', o)
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def min_pool2d(tensor, kernel, stride=1):
|
| 285 |
+
tensor = tensor * -1.0
|
| 286 |
+
tensor = F.max_pool2d(tensor, kernel, padding=kernel//2, stride=stride)
|
| 287 |
+
tensor = -1.0 * tensor
|
| 288 |
+
return tensor
|
external/Metric3D/training/mono/model/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .monodepth_model import DepthModel
|
| 2 |
+
from .criterion import build_criterions
|
| 3 |
+
from .__base_model__ import BaseDepthModel
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
__all__ = ['DepthModel', 'BaseDepthModel']
|
external/Metric3D/training/mono/model/backbones/ConvNeXt.py
ADDED
|
@@ -0,0 +1,271 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from timm.models.layers import trunc_normal_, DropPath
|
| 5 |
+
from timm.models.registry import register_model
|
| 6 |
+
|
| 7 |
+
class Block(nn.Module):
|
| 8 |
+
r""" ConvNeXt Block. There are two equivalent implementations:
|
| 9 |
+
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
|
| 10 |
+
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
|
| 11 |
+
We use (2) as we find it slightly faster in PyTorch
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
dim (int): Number of input channels.
|
| 15 |
+
drop_path (float): Stochastic depth rate. Default: 0.0
|
| 16 |
+
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
|
| 17 |
+
"""
|
| 18 |
+
def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
|
| 19 |
+
super().__init__()
|
| 20 |
+
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv
|
| 21 |
+
self.norm = LayerNorm(dim, eps=1e-6)
|
| 22 |
+
self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers
|
| 23 |
+
self.act = nn.GELU()
|
| 24 |
+
self.pwconv2 = nn.Linear(4 * dim, dim)
|
| 25 |
+
self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)),
|
| 26 |
+
requires_grad=True) if layer_scale_init_value > 0 else None
|
| 27 |
+
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
| 28 |
+
|
| 29 |
+
def forward(self, x):
|
| 30 |
+
input = x
|
| 31 |
+
x = self.dwconv(x)
|
| 32 |
+
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
|
| 33 |
+
x = self.norm(x)
|
| 34 |
+
x = self.pwconv1(x)
|
| 35 |
+
x = self.act(x)
|
| 36 |
+
x = self.pwconv2(x)
|
| 37 |
+
if self.gamma is not None:
|
| 38 |
+
x = self.gamma * x
|
| 39 |
+
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
|
| 40 |
+
|
| 41 |
+
x = input + self.drop_path(x)
|
| 42 |
+
return x
|
| 43 |
+
|
| 44 |
+
class ConvNeXt(nn.Module):
|
| 45 |
+
r""" ConvNeXt
|
| 46 |
+
A PyTorch impl of : `A ConvNet for the 2020s` -
|
| 47 |
+
https://arxiv.org/pdf/2201.03545.pdf
|
| 48 |
+
Args:
|
| 49 |
+
in_chans (int): Number of input image channels. Default: 3
|
| 50 |
+
num_classes (int): Number of classes for classification head. Default: 1000
|
| 51 |
+
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
|
| 52 |
+
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
|
| 53 |
+
drop_path_rate (float): Stochastic depth rate. Default: 0.
|
| 54 |
+
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
|
| 55 |
+
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
|
| 56 |
+
"""
|
| 57 |
+
def __init__(self, in_chans=3, num_classes=1000,
|
| 58 |
+
depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.,
|
| 59 |
+
layer_scale_init_value=1e-6, head_init_scale=1.,
|
| 60 |
+
**kwargs,):
|
| 61 |
+
super().__init__()
|
| 62 |
+
|
| 63 |
+
self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers
|
| 64 |
+
stem = nn.Sequential(
|
| 65 |
+
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
|
| 66 |
+
LayerNorm(dims[0], eps=1e-6, data_format="channels_first")
|
| 67 |
+
)
|
| 68 |
+
self.downsample_layers.append(stem)
|
| 69 |
+
for i in range(3):
|
| 70 |
+
downsample_layer = nn.Sequential(
|
| 71 |
+
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
|
| 72 |
+
nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),
|
| 73 |
+
)
|
| 74 |
+
self.downsample_layers.append(downsample_layer)
|
| 75 |
+
|
| 76 |
+
self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks
|
| 77 |
+
dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
|
| 78 |
+
cur = 0
|
| 79 |
+
for i in range(4):
|
| 80 |
+
stage = nn.Sequential(
|
| 81 |
+
*[Block(dim=dims[i], drop_path=dp_rates[cur + j],
|
| 82 |
+
layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]
|
| 83 |
+
)
|
| 84 |
+
self.stages.append(stage)
|
| 85 |
+
cur += depths[i]
|
| 86 |
+
|
| 87 |
+
#self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer
|
| 88 |
+
#self.head = nn.Linear(dims[-1], num_classes)
|
| 89 |
+
|
| 90 |
+
self.apply(self._init_weights)
|
| 91 |
+
#self.head.weight.data.mul_(head_init_scale)
|
| 92 |
+
#self.head.bias.data.mul_(head_init_scale)
|
| 93 |
+
|
| 94 |
+
def _init_weights(self, m):
|
| 95 |
+
if isinstance(m, (nn.Conv2d, nn.Linear)):
|
| 96 |
+
trunc_normal_(m.weight, std=.02)
|
| 97 |
+
nn.init.constant_(m.bias, 0)
|
| 98 |
+
|
| 99 |
+
def forward_features(self, x):
|
| 100 |
+
features = []
|
| 101 |
+
for i in range(4):
|
| 102 |
+
x = self.downsample_layers[i](x)
|
| 103 |
+
x = self.stages[i](x)
|
| 104 |
+
features.append(x)
|
| 105 |
+
return features # global average pooling, (N, C, H, W) -> (N, C)
|
| 106 |
+
|
| 107 |
+
def forward(self, x):
|
| 108 |
+
#x = self.forward_features(x)
|
| 109 |
+
#x = self.head(x)
|
| 110 |
+
features = self.forward_features(x)
|
| 111 |
+
return features
|
| 112 |
+
|
| 113 |
+
class LayerNorm(nn.Module):
|
| 114 |
+
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
|
| 115 |
+
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
|
| 116 |
+
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
|
| 117 |
+
with shape (batch_size, channels, height, width).
|
| 118 |
+
"""
|
| 119 |
+
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
|
| 120 |
+
super().__init__()
|
| 121 |
+
self.weight = nn.Parameter(torch.ones(normalized_shape))
|
| 122 |
+
self.bias = nn.Parameter(torch.zeros(normalized_shape))
|
| 123 |
+
self.eps = eps
|
| 124 |
+
self.data_format = data_format
|
| 125 |
+
if self.data_format not in ["channels_last", "channels_first"]:
|
| 126 |
+
raise NotImplementedError
|
| 127 |
+
self.normalized_shape = (normalized_shape, )
|
| 128 |
+
|
| 129 |
+
def forward(self, x):
|
| 130 |
+
if self.data_format == "channels_last":
|
| 131 |
+
return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
|
| 132 |
+
elif self.data_format == "channels_first":
|
| 133 |
+
u = x.mean(1, keepdim=True)
|
| 134 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
| 135 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
| 136 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
| 137 |
+
return x
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
model_urls = {
|
| 141 |
+
"convnext_tiny_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth",
|
| 142 |
+
"convnext_small_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth",
|
| 143 |
+
"convnext_base_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth",
|
| 144 |
+
"convnext_large_1k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth",
|
| 145 |
+
"convnext_tiny_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_224.pth",
|
| 146 |
+
"convnext_small_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_224.pth",
|
| 147 |
+
"convnext_base_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth",
|
| 148 |
+
"convnext_large_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth",
|
| 149 |
+
"convnext_xlarge_22k": "https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth",
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
def convnext_tiny(pretrained=True,in_22k=False, **kwargs):
|
| 153 |
+
model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
|
| 154 |
+
if pretrained:
|
| 155 |
+
checkpoint = torch.load(kwargs['checkpoint'], map_location="cpu")
|
| 156 |
+
#url = model_urls['convnext_tiny_22k'] if in_22k else model_urls['convnext_tiny_1k']
|
| 157 |
+
#checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu", check_hash=True)
|
| 158 |
+
model_dict = model.state_dict()
|
| 159 |
+
pretrained_dict = {}
|
| 160 |
+
unmatched_pretrained_dict = {}
|
| 161 |
+
for k, v in checkpoint['model'].items():
|
| 162 |
+
if k in model_dict:
|
| 163 |
+
pretrained_dict[k] = v
|
| 164 |
+
else:
|
| 165 |
+
unmatched_pretrained_dict[k] = v
|
| 166 |
+
model_dict.update(pretrained_dict)
|
| 167 |
+
model.load_state_dict(model_dict)
|
| 168 |
+
print(
|
| 169 |
+
'Successfully loaded pretrained %d paras, and %d paras are unmatched.'
|
| 170 |
+
%(len(pretrained_dict.keys()), len(unmatched_pretrained_dict.keys())))
|
| 171 |
+
print('Unmatched pretrained paras are:', unmatched_pretrained_dict.keys())
|
| 172 |
+
return model
|
| 173 |
+
|
| 174 |
+
def convnext_small(pretrained=True,in_22k=False, **kwargs):
|
| 175 |
+
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
|
| 176 |
+
if pretrained:
|
| 177 |
+
checkpoint = torch.load(kwargs['checkpoint'], map_location="cpu")
|
| 178 |
+
#url = model_urls['convnext_small_22k'] if in_22k else model_urls['convnext_small_1k']
|
| 179 |
+
#checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
|
| 180 |
+
model_dict = model.state_dict()
|
| 181 |
+
pretrained_dict = {}
|
| 182 |
+
unmatched_pretrained_dict = {}
|
| 183 |
+
for k, v in checkpoint['model'].items():
|
| 184 |
+
if k in model_dict:
|
| 185 |
+
pretrained_dict[k] = v
|
| 186 |
+
else:
|
| 187 |
+
unmatched_pretrained_dict[k] = v
|
| 188 |
+
model_dict.update(pretrained_dict)
|
| 189 |
+
model.load_state_dict(model_dict)
|
| 190 |
+
print(
|
| 191 |
+
'Successfully loaded pretrained %d paras, and %d paras are unmatched.'
|
| 192 |
+
%(len(pretrained_dict.keys()), len(unmatched_pretrained_dict.keys())))
|
| 193 |
+
print('Unmatched pretrained paras are:', unmatched_pretrained_dict.keys())
|
| 194 |
+
return model
|
| 195 |
+
|
| 196 |
+
def convnext_base(pretrained=True, in_22k=False, **kwargs):
|
| 197 |
+
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
|
| 198 |
+
if pretrained:
|
| 199 |
+
checkpoint = torch.load(kwargs['checkpoint'], map_location="cpu")
|
| 200 |
+
#url = model_urls['convnext_base_22k'] if in_22k else model_urls['convnext_base_1k']
|
| 201 |
+
#checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
|
| 202 |
+
model_dict = model.state_dict()
|
| 203 |
+
pretrained_dict = {}
|
| 204 |
+
unmatched_pretrained_dict = {}
|
| 205 |
+
for k, v in checkpoint['model'].items():
|
| 206 |
+
if k in model_dict:
|
| 207 |
+
pretrained_dict[k] = v
|
| 208 |
+
else:
|
| 209 |
+
unmatched_pretrained_dict[k] = v
|
| 210 |
+
model_dict.update(pretrained_dict)
|
| 211 |
+
model.load_state_dict(model_dict)
|
| 212 |
+
print(
|
| 213 |
+
'Successfully loaded pretrained %d paras, and %d paras are unmatched.'
|
| 214 |
+
%(len(pretrained_dict.keys()), len(unmatched_pretrained_dict.keys())))
|
| 215 |
+
print('Unmatched pretrained paras are:', unmatched_pretrained_dict.keys())
|
| 216 |
+
return model
|
| 217 |
+
|
| 218 |
+
def convnext_large(pretrained=True, in_22k=False, **kwargs):
|
| 219 |
+
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
|
| 220 |
+
if pretrained:
|
| 221 |
+
checkpoint = torch.load(kwargs['checkpoint'], map_location="cpu")
|
| 222 |
+
#url = model_urls['convnext_large_22k'] if in_22k else model_urls['convnext_large_1k']
|
| 223 |
+
#checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
|
| 224 |
+
model_dict = model.state_dict()
|
| 225 |
+
pretrained_dict = {}
|
| 226 |
+
unmatched_pretrained_dict = {}
|
| 227 |
+
for k, v in checkpoint['model'].items():
|
| 228 |
+
if k in model_dict:
|
| 229 |
+
pretrained_dict[k] = v
|
| 230 |
+
else:
|
| 231 |
+
unmatched_pretrained_dict[k] = v
|
| 232 |
+
model_dict.update(pretrained_dict)
|
| 233 |
+
model.load_state_dict(model_dict)
|
| 234 |
+
print(
|
| 235 |
+
'Successfully loaded pretrained %d paras, and %d paras are unmatched.'
|
| 236 |
+
%(len(pretrained_dict.keys()), len(unmatched_pretrained_dict.keys())))
|
| 237 |
+
print('Unmatched pretrained paras are:', unmatched_pretrained_dict.keys())
|
| 238 |
+
return model
|
| 239 |
+
|
| 240 |
+
def convnext_xlarge(pretrained=True, in_22k=False, **kwargs):
|
| 241 |
+
model = ConvNeXt(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
|
| 242 |
+
if pretrained:
|
| 243 |
+
assert in_22k, "only ImageNet-22K pre-trained ConvNeXt-XL is available; please set in_22k=True"
|
| 244 |
+
checkpoint = torch.load(kwargs['checkpoint'], map_location="cpu")
|
| 245 |
+
#url = model_urls['convnext_xlarge_22k']
|
| 246 |
+
#checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location="cpu")
|
| 247 |
+
model_dict = model.state_dict()
|
| 248 |
+
pretrained_dict = {}
|
| 249 |
+
unmatched_pretrained_dict = {}
|
| 250 |
+
for k, v in checkpoint['model'].items():
|
| 251 |
+
if k in model_dict:
|
| 252 |
+
pretrained_dict[k] = v
|
| 253 |
+
else:
|
| 254 |
+
unmatched_pretrained_dict[k] = v
|
| 255 |
+
model_dict.update(pretrained_dict)
|
| 256 |
+
model.load_state_dict(model_dict)
|
| 257 |
+
print(
|
| 258 |
+
'Successfully loaded pretrained %d paras, and %d paras are unmatched.'
|
| 259 |
+
%(len(pretrained_dict.keys()), len(unmatched_pretrained_dict.keys())))
|
| 260 |
+
print('Unmatched pretrained paras are:', unmatched_pretrained_dict.keys())
|
| 261 |
+
return model
|
| 262 |
+
|
| 263 |
+
if __name__ == '__main__':
|
| 264 |
+
import torch
|
| 265 |
+
model = convnext_base(True, in_22k=False).cuda()
|
| 266 |
+
|
| 267 |
+
rgb = torch.rand((2, 3, 256, 256)).cuda()
|
| 268 |
+
out = model(rgb)
|
| 269 |
+
print(len(out))
|
| 270 |
+
for i, ft in enumerate(out):
|
| 271 |
+
print(i, ft.shape)
|
external/Metric3D/training/mono/model/backbones/ViT_DINO.py
ADDED
|
@@ -0,0 +1,1504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# References:
|
| 8 |
+
# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
|
| 9 |
+
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
|
| 10 |
+
|
| 11 |
+
from functools import partial
|
| 12 |
+
import math
|
| 13 |
+
import logging
|
| 14 |
+
from typing import Sequence, Tuple, Union, Callable, Optional, Dict, Any, List
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
from torch import Tensor
|
| 19 |
+
import torch.utils.checkpoint
|
| 20 |
+
from torch.nn.init import trunc_normal_
|
| 21 |
+
|
| 22 |
+
#from dinov2.layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger("dinov2")
|
| 25 |
+
|
| 26 |
+
class ConvBlock(nn.Module):
|
| 27 |
+
def __init__(self, channels):
|
| 28 |
+
super(ConvBlock, self).__init__()
|
| 29 |
+
|
| 30 |
+
self.act = nn.ReLU(inplace=True)
|
| 31 |
+
self.conv1 = nn.Conv2d(
|
| 32 |
+
channels,
|
| 33 |
+
channels,
|
| 34 |
+
kernel_size=3,
|
| 35 |
+
stride=1,
|
| 36 |
+
padding=1
|
| 37 |
+
)
|
| 38 |
+
self.norm1 = nn.BatchNorm2d(channels)
|
| 39 |
+
self.conv2 = nn.Conv2d(
|
| 40 |
+
channels,
|
| 41 |
+
channels,
|
| 42 |
+
kernel_size=3,
|
| 43 |
+
stride=1,
|
| 44 |
+
padding=1
|
| 45 |
+
)
|
| 46 |
+
self.norm2 = nn.BatchNorm2d(channels)
|
| 47 |
+
|
| 48 |
+
def forward(self, x):
|
| 49 |
+
|
| 50 |
+
out = self.norm1(x)
|
| 51 |
+
out = self.act(out)
|
| 52 |
+
out = self.conv1(out)
|
| 53 |
+
out = self.norm2(out)
|
| 54 |
+
out = self.act(out)
|
| 55 |
+
out = self.conv2(out)
|
| 56 |
+
return x + out
|
| 57 |
+
|
| 58 |
+
def make_2tuple(x):
|
| 59 |
+
if isinstance(x, tuple):
|
| 60 |
+
assert len(x) == 2
|
| 61 |
+
return x
|
| 62 |
+
|
| 63 |
+
assert isinstance(x, int)
|
| 64 |
+
return (x, x)
|
| 65 |
+
|
| 66 |
+
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
|
| 67 |
+
if drop_prob == 0.0 or not training:
|
| 68 |
+
return x
|
| 69 |
+
keep_prob = 1 - drop_prob
|
| 70 |
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 71 |
+
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
| 72 |
+
if keep_prob > 0.0:
|
| 73 |
+
random_tensor.div_(keep_prob)
|
| 74 |
+
output = x * random_tensor
|
| 75 |
+
return output
|
| 76 |
+
|
| 77 |
+
class DropPath(nn.Module):
|
| 78 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 79 |
+
|
| 80 |
+
def __init__(self, drop_prob=None):
|
| 81 |
+
super(DropPath, self).__init__()
|
| 82 |
+
self.drop_prob = drop_prob
|
| 83 |
+
|
| 84 |
+
def forward(self, x):
|
| 85 |
+
return drop_path(x, self.drop_prob, self.training)
|
| 86 |
+
|
| 87 |
+
class LayerScale(nn.Module):
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
dim: int,
|
| 91 |
+
init_values: Union[float, Tensor] = 1e-5,
|
| 92 |
+
inplace: bool = False,
|
| 93 |
+
) -> None:
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.inplace = inplace
|
| 96 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
| 97 |
+
|
| 98 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 99 |
+
return x.mul_(self.gamma) if self.inplace else x * self.gamma
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class PatchEmbed(nn.Module):
|
| 103 |
+
"""
|
| 104 |
+
2D image to patch embedding: (B,C,H,W) -> (B,N,D)
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
img_size: Image size.
|
| 108 |
+
patch_size: Patch token size.
|
| 109 |
+
in_chans: Number of input image channels.
|
| 110 |
+
embed_dim: Number of linear projection output channels.
|
| 111 |
+
norm_layer: Normalization layer.
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
def __init__(
|
| 115 |
+
self,
|
| 116 |
+
img_size: Union[int, Tuple[int, int]] = 224,
|
| 117 |
+
patch_size: Union[int, Tuple[int, int]] = 16,
|
| 118 |
+
in_chans: int = 3,
|
| 119 |
+
embed_dim: int = 768,
|
| 120 |
+
norm_layer: Optional[Callable] = None,
|
| 121 |
+
flatten_embedding: bool = True,
|
| 122 |
+
) -> None:
|
| 123 |
+
super().__init__()
|
| 124 |
+
|
| 125 |
+
image_HW = make_2tuple(img_size)
|
| 126 |
+
patch_HW = make_2tuple(patch_size)
|
| 127 |
+
patch_grid_size = (
|
| 128 |
+
image_HW[0] // patch_HW[0],
|
| 129 |
+
image_HW[1] // patch_HW[1],
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
self.img_size = image_HW
|
| 133 |
+
self.patch_size = patch_HW
|
| 134 |
+
self.patches_resolution = patch_grid_size
|
| 135 |
+
self.num_patches = patch_grid_size[0] * patch_grid_size[1]
|
| 136 |
+
|
| 137 |
+
self.in_chans = in_chans
|
| 138 |
+
self.embed_dim = embed_dim
|
| 139 |
+
|
| 140 |
+
self.flatten_embedding = flatten_embedding
|
| 141 |
+
|
| 142 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
|
| 143 |
+
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
| 144 |
+
|
| 145 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 146 |
+
_, _, H, W = x.shape
|
| 147 |
+
patch_H, patch_W = self.patch_size
|
| 148 |
+
|
| 149 |
+
assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
|
| 150 |
+
assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
|
| 151 |
+
|
| 152 |
+
x = self.proj(x) # B C H W
|
| 153 |
+
H, W = x.size(2), x.size(3)
|
| 154 |
+
x = x.flatten(2).transpose(1, 2) # B HW C
|
| 155 |
+
x = self.norm(x)
|
| 156 |
+
if not self.flatten_embedding:
|
| 157 |
+
x = x.reshape(-1, H, W, self.embed_dim) # B H W C
|
| 158 |
+
return x
|
| 159 |
+
|
| 160 |
+
def flops(self) -> float:
|
| 161 |
+
Ho, Wo = self.patches_resolution
|
| 162 |
+
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
|
| 163 |
+
if self.norm is not None:
|
| 164 |
+
flops += Ho * Wo * self.embed_dim
|
| 165 |
+
return flops
|
| 166 |
+
|
| 167 |
+
class Mlp(nn.Module):
|
| 168 |
+
def __init__(
|
| 169 |
+
self,
|
| 170 |
+
in_features: int,
|
| 171 |
+
hidden_features: Optional[int] = None,
|
| 172 |
+
out_features: Optional[int] = None,
|
| 173 |
+
act_layer: Callable[..., nn.Module] = nn.GELU,
|
| 174 |
+
drop: float = 0.0,
|
| 175 |
+
bias: bool = True,
|
| 176 |
+
) -> None:
|
| 177 |
+
super().__init__()
|
| 178 |
+
out_features = out_features or in_features
|
| 179 |
+
hidden_features = hidden_features or in_features
|
| 180 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
|
| 181 |
+
self.act = act_layer()
|
| 182 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
|
| 183 |
+
self.drop = nn.Dropout(drop)
|
| 184 |
+
|
| 185 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 186 |
+
x = self.fc1(x)
|
| 187 |
+
x = self.act(x)
|
| 188 |
+
x = self.drop(x)
|
| 189 |
+
x = self.fc2(x)
|
| 190 |
+
x = self.drop(x)
|
| 191 |
+
return x
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class SwiGLUFFN(nn.Module):
|
| 195 |
+
def __init__(
|
| 196 |
+
self,
|
| 197 |
+
in_features: int,
|
| 198 |
+
hidden_features: Optional[int] = None,
|
| 199 |
+
out_features: Optional[int] = None,
|
| 200 |
+
act_layer: Callable[..., nn.Module] = None,
|
| 201 |
+
drop: float = 0.0,
|
| 202 |
+
bias: bool = True,
|
| 203 |
+
) -> None:
|
| 204 |
+
super().__init__()
|
| 205 |
+
out_features = out_features or in_features
|
| 206 |
+
hidden_features = hidden_features or in_features
|
| 207 |
+
self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
|
| 208 |
+
self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
|
| 209 |
+
|
| 210 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 211 |
+
x12 = self.w12(x)
|
| 212 |
+
x1, x2 = x12.chunk(2, dim=-1)
|
| 213 |
+
hidden = F.silu(x1) * x2
|
| 214 |
+
return self.w3(hidden)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
from xformers.ops import SwiGLU
|
| 219 |
+
#import numpy.bool
|
| 220 |
+
XFORMERS_AVAILABLE = True
|
| 221 |
+
except ImportError:
|
| 222 |
+
SwiGLU = SwiGLUFFN
|
| 223 |
+
XFORMERS_AVAILABLE = False
|
| 224 |
+
|
| 225 |
+
class SwiGLUFFNFused(SwiGLU):
|
| 226 |
+
def __init__(
|
| 227 |
+
self,
|
| 228 |
+
in_features: int,
|
| 229 |
+
hidden_features: Optional[int] = None,
|
| 230 |
+
out_features: Optional[int] = None,
|
| 231 |
+
act_layer: Callable[..., nn.Module] = None,
|
| 232 |
+
drop: float = 0.0,
|
| 233 |
+
bias: bool = True,
|
| 234 |
+
) -> None:
|
| 235 |
+
out_features = out_features or in_features
|
| 236 |
+
hidden_features = hidden_features or in_features
|
| 237 |
+
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
|
| 238 |
+
super().__init__(
|
| 239 |
+
in_features=in_features,
|
| 240 |
+
hidden_features=hidden_features,
|
| 241 |
+
out_features=out_features,
|
| 242 |
+
bias=bias,
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
try:
|
| 247 |
+
from xformers.ops import memory_efficient_attention, unbind, fmha
|
| 248 |
+
from xformers.components.attention import ScaledDotProduct
|
| 249 |
+
from xformers.components import MultiHeadDispatch
|
| 250 |
+
#import numpy.bool
|
| 251 |
+
XFORMERS_AVAILABLE = True
|
| 252 |
+
except ImportError:
|
| 253 |
+
logger.warning("xFormers not available")
|
| 254 |
+
XFORMERS_AVAILABLE = False
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class Attention(nn.Module):
|
| 258 |
+
def __init__(
|
| 259 |
+
self,
|
| 260 |
+
dim: int,
|
| 261 |
+
num_heads: int = 8,
|
| 262 |
+
qkv_bias: bool = False,
|
| 263 |
+
proj_bias: bool = True,
|
| 264 |
+
attn_drop: float = 0.0,
|
| 265 |
+
proj_drop: float = 0.0,
|
| 266 |
+
window_size: int = 0,
|
| 267 |
+
) -> None:
|
| 268 |
+
super().__init__()
|
| 269 |
+
self.num_heads = num_heads
|
| 270 |
+
head_dim = dim // num_heads
|
| 271 |
+
self.scale = head_dim**-0.5
|
| 272 |
+
|
| 273 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 274 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 275 |
+
self.proj = nn.Linear(dim, dim, bias=proj_bias)
|
| 276 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 277 |
+
|
| 278 |
+
#if not self.training:
|
| 279 |
+
#
|
| 280 |
+
# self.attn = ScaledDotProduct()
|
| 281 |
+
#self.attn = MultiHeadDispatch(dim_model=EMB, residual_dropout=DROPOUT, num_heads=HEADS, attention=attn)
|
| 282 |
+
|
| 283 |
+
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
|
| 284 |
+
B, N, C = x.shape
|
| 285 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 286 |
+
|
| 287 |
+
q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
|
| 288 |
+
attn = q @ k.transpose(-2, -1)
|
| 289 |
+
|
| 290 |
+
if attn_bias is not None:
|
| 291 |
+
attn = attn + attn_bias[:, :, :N]
|
| 292 |
+
|
| 293 |
+
attn = attn.softmax(dim=-1)
|
| 294 |
+
attn = self.attn_drop(attn)
|
| 295 |
+
|
| 296 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 297 |
+
x = self.proj(x)
|
| 298 |
+
x = self.proj_drop(x)
|
| 299 |
+
return x
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
class MemEffAttention(Attention):
|
| 303 |
+
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
|
| 304 |
+
if not XFORMERS_AVAILABLE:
|
| 305 |
+
#if True:
|
| 306 |
+
assert attn_bias is None, "xFormers is required for nested tensors usage"
|
| 307 |
+
return super().forward(x, attn_bias)
|
| 308 |
+
|
| 309 |
+
B, N, C = x.shape
|
| 310 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
|
| 311 |
+
|
| 312 |
+
q, k, v = unbind(qkv, 2)
|
| 313 |
+
if attn_bias is not None:
|
| 314 |
+
x = memory_efficient_attention(q, k, v, attn_bias=attn_bias[:, :, :N])
|
| 315 |
+
else:
|
| 316 |
+
x = memory_efficient_attention(q, k, v)
|
| 317 |
+
x = x.reshape([B, N, C])
|
| 318 |
+
|
| 319 |
+
x = self.proj(x)
|
| 320 |
+
x = self.proj_drop(x)
|
| 321 |
+
return x
|
| 322 |
+
|
| 323 |
+
try:
|
| 324 |
+
from xformers.ops import fmha
|
| 325 |
+
from xformers.ops import scaled_index_add, index_select_cat
|
| 326 |
+
#import numpy.bool
|
| 327 |
+
XFORMERS_AVAILABLE = True
|
| 328 |
+
except ImportError:
|
| 329 |
+
logger.warning("xFormers not available")
|
| 330 |
+
XFORMERS_AVAILABLE = False
|
| 331 |
+
|
| 332 |
+
class Block(nn.Module):
|
| 333 |
+
def __init__(
|
| 334 |
+
self,
|
| 335 |
+
dim: int,
|
| 336 |
+
num_heads: int,
|
| 337 |
+
mlp_ratio: float = 4.0,
|
| 338 |
+
qkv_bias: bool = False,
|
| 339 |
+
proj_bias: bool = True,
|
| 340 |
+
ffn_bias: bool = True,
|
| 341 |
+
drop: float = 0.0,
|
| 342 |
+
attn_drop: float = 0.0,
|
| 343 |
+
init_values = None,
|
| 344 |
+
drop_path: float = 0.0,
|
| 345 |
+
act_layer: Callable[..., nn.Module] = nn.GELU,
|
| 346 |
+
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
|
| 347 |
+
attn_class: Callable[..., nn.Module] = Attention,
|
| 348 |
+
ffn_layer: Callable[..., nn.Module] = Mlp,
|
| 349 |
+
) -> None:
|
| 350 |
+
super().__init__()
|
| 351 |
+
# print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
|
| 352 |
+
self.norm1 = norm_layer(dim)
|
| 353 |
+
self.attn = attn_class(
|
| 354 |
+
dim,
|
| 355 |
+
num_heads=num_heads,
|
| 356 |
+
qkv_bias=qkv_bias,
|
| 357 |
+
proj_bias=proj_bias,
|
| 358 |
+
attn_drop=attn_drop,
|
| 359 |
+
proj_drop=drop,
|
| 360 |
+
)
|
| 361 |
+
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 362 |
+
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 363 |
+
|
| 364 |
+
self.norm2 = norm_layer(dim)
|
| 365 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 366 |
+
self.mlp = ffn_layer(
|
| 367 |
+
in_features=dim,
|
| 368 |
+
hidden_features=mlp_hidden_dim,
|
| 369 |
+
act_layer=act_layer,
|
| 370 |
+
drop=drop,
|
| 371 |
+
bias=ffn_bias,
|
| 372 |
+
)
|
| 373 |
+
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 374 |
+
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 375 |
+
|
| 376 |
+
self.sample_drop_ratio = drop_path
|
| 377 |
+
|
| 378 |
+
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
|
| 379 |
+
def attn_residual_func(x: Tensor, attn_bias) -> Tensor:
|
| 380 |
+
return self.ls1(self.attn(self.norm1(x), attn_bias))
|
| 381 |
+
|
| 382 |
+
def ffn_residual_func(x: Tensor) -> Tensor:
|
| 383 |
+
return self.ls2(self.mlp(self.norm2(x)))
|
| 384 |
+
|
| 385 |
+
if self.training and self.sample_drop_ratio > 0.1:
|
| 386 |
+
# the overhead is compensated only for a drop path rate larger than 0.1
|
| 387 |
+
x = drop_add_residual_stochastic_depth(
|
| 388 |
+
x,
|
| 389 |
+
residual_func=attn_residual_func,
|
| 390 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 391 |
+
attn_bias=attn_bias
|
| 392 |
+
)
|
| 393 |
+
x = drop_add_residual_stochastic_depth(
|
| 394 |
+
x,
|
| 395 |
+
residual_func=ffn_residual_func,
|
| 396 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 397 |
+
)
|
| 398 |
+
elif self.training and self.sample_drop_ratio > 0.0:
|
| 399 |
+
x = x + self.drop_path1(attn_residual_func(x, attn_bias))
|
| 400 |
+
x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
|
| 401 |
+
else:
|
| 402 |
+
x = x + attn_residual_func(x, attn_bias)
|
| 403 |
+
x = x + ffn_residual_func(x)
|
| 404 |
+
return x
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def drop_add_residual_stochastic_depth(
|
| 408 |
+
x: Tensor,
|
| 409 |
+
residual_func: Callable[[Tensor], Tensor],
|
| 410 |
+
sample_drop_ratio: float = 0.0, attn_bias=None
|
| 411 |
+
) -> Tensor:
|
| 412 |
+
# 1) extract subset using permutation
|
| 413 |
+
b, n, d = x.shape
|
| 414 |
+
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
|
| 415 |
+
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
|
| 416 |
+
x_subset = x[brange]
|
| 417 |
+
|
| 418 |
+
# 2) apply residual_func to get residual
|
| 419 |
+
residual = residual_func(x_subset, attn_bias)
|
| 420 |
+
|
| 421 |
+
x_flat = x.flatten(1)
|
| 422 |
+
residual = residual.flatten(1)
|
| 423 |
+
|
| 424 |
+
residual_scale_factor = b / sample_subset_size
|
| 425 |
+
|
| 426 |
+
# 3) add the residual
|
| 427 |
+
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
|
| 428 |
+
return x_plus_residual.view_as(x)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def get_branges_scales(x, sample_drop_ratio=0.0):
|
| 432 |
+
b, n, d = x.shape
|
| 433 |
+
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
|
| 434 |
+
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
|
| 435 |
+
residual_scale_factor = b / sample_subset_size
|
| 436 |
+
return brange, residual_scale_factor
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
|
| 440 |
+
if scaling_vector is None:
|
| 441 |
+
x_flat = x.flatten(1)
|
| 442 |
+
residual = residual.flatten(1)
|
| 443 |
+
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
|
| 444 |
+
else:
|
| 445 |
+
x_plus_residual = scaled_index_add(
|
| 446 |
+
x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
|
| 447 |
+
)
|
| 448 |
+
return x_plus_residual
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
attn_bias_cache: Dict[Tuple, Any] = {}
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def get_attn_bias_and_cat(x_list, branges=None):
|
| 455 |
+
"""
|
| 456 |
+
this will perform the index select, cat the tensors, and provide the attn_bias from cache
|
| 457 |
+
"""
|
| 458 |
+
batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
|
| 459 |
+
all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
|
| 460 |
+
if all_shapes not in attn_bias_cache.keys():
|
| 461 |
+
seqlens = []
|
| 462 |
+
for b, x in zip(batch_sizes, x_list):
|
| 463 |
+
for _ in range(b):
|
| 464 |
+
seqlens.append(x.shape[1])
|
| 465 |
+
attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
|
| 466 |
+
attn_bias._batch_sizes = batch_sizes
|
| 467 |
+
attn_bias_cache[all_shapes] = attn_bias
|
| 468 |
+
|
| 469 |
+
if branges is not None:
|
| 470 |
+
cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
|
| 471 |
+
else:
|
| 472 |
+
tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
|
| 473 |
+
cat_tensors = torch.cat(tensors_bs1, dim=1)
|
| 474 |
+
|
| 475 |
+
return attn_bias_cache[all_shapes], cat_tensors
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def drop_add_residual_stochastic_depth_list(
|
| 479 |
+
x_list: List[Tensor],
|
| 480 |
+
residual_func: Callable[[Tensor, Any], Tensor],
|
| 481 |
+
sample_drop_ratio: float = 0.0,
|
| 482 |
+
scaling_vector=None,
|
| 483 |
+
) -> Tensor:
|
| 484 |
+
# 1) generate random set of indices for dropping samples in the batch
|
| 485 |
+
branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
|
| 486 |
+
branges = [s[0] for s in branges_scales]
|
| 487 |
+
residual_scale_factors = [s[1] for s in branges_scales]
|
| 488 |
+
|
| 489 |
+
# 2) get attention bias and index+concat the tensors
|
| 490 |
+
attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
|
| 491 |
+
|
| 492 |
+
# 3) apply residual_func to get residual, and split the result
|
| 493 |
+
residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
|
| 494 |
+
|
| 495 |
+
outputs = []
|
| 496 |
+
for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
|
| 497 |
+
outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
|
| 498 |
+
return outputs
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class NestedTensorBlock(Block):
|
| 502 |
+
def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
|
| 503 |
+
"""
|
| 504 |
+
x_list contains a list of tensors to nest together and run
|
| 505 |
+
"""
|
| 506 |
+
assert isinstance(self.attn, MemEffAttention)
|
| 507 |
+
|
| 508 |
+
if self.training and self.sample_drop_ratio > 0.0:
|
| 509 |
+
|
| 510 |
+
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 511 |
+
return self.attn(self.norm1(x), attn_bias=attn_bias)
|
| 512 |
+
|
| 513 |
+
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 514 |
+
return self.mlp(self.norm2(x))
|
| 515 |
+
|
| 516 |
+
x_list = drop_add_residual_stochastic_depth_list(
|
| 517 |
+
x_list,
|
| 518 |
+
residual_func=attn_residual_func,
|
| 519 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 520 |
+
scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None,
|
| 521 |
+
)
|
| 522 |
+
x_list = drop_add_residual_stochastic_depth_list(
|
| 523 |
+
x_list,
|
| 524 |
+
residual_func=ffn_residual_func,
|
| 525 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 526 |
+
scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None,
|
| 527 |
+
)
|
| 528 |
+
return x_list
|
| 529 |
+
else:
|
| 530 |
+
|
| 531 |
+
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 532 |
+
return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
|
| 533 |
+
|
| 534 |
+
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 535 |
+
return self.ls2(self.mlp(self.norm2(x)))
|
| 536 |
+
|
| 537 |
+
attn_bias, x = get_attn_bias_and_cat(x_list)
|
| 538 |
+
x = x + attn_residual_func(x, attn_bias=attn_bias)
|
| 539 |
+
x = x + ffn_residual_func(x)
|
| 540 |
+
return attn_bias.split(x)
|
| 541 |
+
|
| 542 |
+
def forward(self, x_or_x_list, attn_bias=None):
|
| 543 |
+
if isinstance(x_or_x_list, Tensor):
|
| 544 |
+
return super().forward(x_or_x_list, attn_bias)
|
| 545 |
+
elif isinstance(x_or_x_list, list):
|
| 546 |
+
assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage"
|
| 547 |
+
return self.forward_nested(x_or_x_list)
|
| 548 |
+
else:
|
| 549 |
+
raise AssertionError
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
|
| 553 |
+
if not depth_first and include_root:
|
| 554 |
+
fn(module=module, name=name)
|
| 555 |
+
for child_name, child_module in module.named_children():
|
| 556 |
+
child_name = ".".join((name, child_name)) if name else child_name
|
| 557 |
+
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
|
| 558 |
+
if depth_first and include_root:
|
| 559 |
+
fn(module=module, name=name)
|
| 560 |
+
return module
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class BlockChunk(nn.ModuleList):
|
| 564 |
+
def forward(self, x, others=None):
|
| 565 |
+
for b in self:
|
| 566 |
+
if others == None:
|
| 567 |
+
x = b(x)
|
| 568 |
+
else:
|
| 569 |
+
x = b(x, others)
|
| 570 |
+
return x
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
class DinoVisionTransformer(nn.Module):
|
| 574 |
+
def __init__(
|
| 575 |
+
self,
|
| 576 |
+
img_size=224,
|
| 577 |
+
patch_size=16,
|
| 578 |
+
in_chans=3,
|
| 579 |
+
embed_dim=768,
|
| 580 |
+
depth=12,
|
| 581 |
+
num_heads=12,
|
| 582 |
+
mlp_ratio=4.0,
|
| 583 |
+
qkv_bias=True,
|
| 584 |
+
ffn_bias=True,
|
| 585 |
+
proj_bias=True,
|
| 586 |
+
drop_path_rate=0.0,
|
| 587 |
+
drop_path_uniform=False,
|
| 588 |
+
#init_values=None, # for layerscale: None or 0 => no layerscale
|
| 589 |
+
init_values=1e-5, # for layerscale: None or 0 => no layerscale
|
| 590 |
+
embed_layer=PatchEmbed,
|
| 591 |
+
act_layer=nn.GELU,
|
| 592 |
+
block_fn=NestedTensorBlock,
|
| 593 |
+
ffn_layer="mlp",
|
| 594 |
+
block_chunks=1,
|
| 595 |
+
window_size=37,
|
| 596 |
+
**kwargs
|
| 597 |
+
):
|
| 598 |
+
"""
|
| 599 |
+
Args:
|
| 600 |
+
img_size (int, tuple): input image size
|
| 601 |
+
patch_size (int, tuple): patch size
|
| 602 |
+
in_chans (int): number of input channels
|
| 603 |
+
embed_dim (int): embedding dimension
|
| 604 |
+
depth (int): depth of transformer
|
| 605 |
+
num_heads (int): number of attention heads
|
| 606 |
+
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
| 607 |
+
qkv_bias (bool): enable bias for qkv if True
|
| 608 |
+
proj_bias (bool): enable bias for proj in attn if True
|
| 609 |
+
ffn_bias (bool): enable bias for ffn if True
|
| 610 |
+
drop_path_rate (float): stochastic depth rate
|
| 611 |
+
drop_path_uniform (bool): apply uniform drop rate across blocks
|
| 612 |
+
weight_init (str): weight init scheme
|
| 613 |
+
init_values (float): layer-scale init values
|
| 614 |
+
embed_layer (nn.Module): patch embedding layer
|
| 615 |
+
act_layer (nn.Module): MLP activation layer
|
| 616 |
+
block_fn (nn.Module): transformer block class
|
| 617 |
+
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
|
| 618 |
+
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
|
| 619 |
+
"""
|
| 620 |
+
super().__init__()
|
| 621 |
+
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
| 622 |
+
|
| 623 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 624 |
+
self.num_tokens = 1
|
| 625 |
+
self.n_blocks = depth
|
| 626 |
+
self.num_heads = num_heads
|
| 627 |
+
self.patch_size = patch_size
|
| 628 |
+
self.window_size = window_size
|
| 629 |
+
|
| 630 |
+
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
| 631 |
+
num_patches = self.patch_embed.num_patches
|
| 632 |
+
|
| 633 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 634 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
|
| 635 |
+
|
| 636 |
+
if drop_path_uniform is True:
|
| 637 |
+
dpr = [drop_path_rate] * depth
|
| 638 |
+
else:
|
| 639 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
| 640 |
+
|
| 641 |
+
if ffn_layer == "mlp":
|
| 642 |
+
logger.info("using MLP layer as FFN")
|
| 643 |
+
ffn_layer = Mlp
|
| 644 |
+
elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
|
| 645 |
+
logger.info("using SwiGLU layer as FFN")
|
| 646 |
+
ffn_layer = SwiGLUFFNFused
|
| 647 |
+
elif ffn_layer == "identity":
|
| 648 |
+
logger.info("using Identity layer as FFN")
|
| 649 |
+
|
| 650 |
+
def f(*args, **kwargs):
|
| 651 |
+
return nn.Identity()
|
| 652 |
+
|
| 653 |
+
ffn_layer = f
|
| 654 |
+
else:
|
| 655 |
+
raise NotImplementedError
|
| 656 |
+
|
| 657 |
+
blocks_list = [
|
| 658 |
+
block_fn(
|
| 659 |
+
dim=embed_dim,
|
| 660 |
+
num_heads=num_heads,
|
| 661 |
+
mlp_ratio=mlp_ratio,
|
| 662 |
+
qkv_bias=qkv_bias,
|
| 663 |
+
proj_bias=proj_bias,
|
| 664 |
+
ffn_bias=ffn_bias,
|
| 665 |
+
drop_path=dpr[i],
|
| 666 |
+
norm_layer=norm_layer,
|
| 667 |
+
act_layer=act_layer,
|
| 668 |
+
ffn_layer=ffn_layer,
|
| 669 |
+
init_values=init_values,
|
| 670 |
+
)
|
| 671 |
+
for i in range(depth)
|
| 672 |
+
]
|
| 673 |
+
if block_chunks > 0:
|
| 674 |
+
self.chunked_blocks = True
|
| 675 |
+
chunked_blocks = []
|
| 676 |
+
chunksize = depth // block_chunks
|
| 677 |
+
for i in range(0, depth, chunksize):
|
| 678 |
+
# this is to keep the block index consistent if we chunk the block list
|
| 679 |
+
chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
|
| 680 |
+
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
|
| 681 |
+
else:
|
| 682 |
+
self.chunked_blocks = False
|
| 683 |
+
self.blocks = nn.ModuleList(blocks_list)
|
| 684 |
+
|
| 685 |
+
self.norm = norm_layer(embed_dim)
|
| 686 |
+
self.head = nn.Identity()
|
| 687 |
+
|
| 688 |
+
self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
|
| 689 |
+
|
| 690 |
+
self.init_weights()
|
| 691 |
+
|
| 692 |
+
def init_weights(self):
|
| 693 |
+
trunc_normal_(self.pos_embed, std=0.02)
|
| 694 |
+
nn.init.normal_(self.cls_token, std=1e-6)
|
| 695 |
+
named_apply(init_weights_vit_timm, self)
|
| 696 |
+
|
| 697 |
+
def interpolate_pos_encoding(self, x, w, h):
|
| 698 |
+
previous_dtype = x.dtype
|
| 699 |
+
npatch = x.shape[1] - 1
|
| 700 |
+
N = self.pos_embed.shape[1] - 1
|
| 701 |
+
if npatch == N and w == h:
|
| 702 |
+
return self.pos_embed
|
| 703 |
+
pos_embed = self.pos_embed.float()
|
| 704 |
+
class_pos_embed = pos_embed[:, 0]
|
| 705 |
+
patch_pos_embed = pos_embed[:, 1:]
|
| 706 |
+
dim = x.shape[-1]
|
| 707 |
+
w0 = w // self.patch_size
|
| 708 |
+
h0 = h // self.patch_size
|
| 709 |
+
# we add a small number to avoid floating point error in the interpolation
|
| 710 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
| 711 |
+
w0, h0 = w0 + 0.1, h0 + 0.1
|
| 712 |
+
|
| 713 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 714 |
+
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
|
| 715 |
+
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
|
| 716 |
+
mode="bicubic",
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
|
| 720 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 721 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
|
| 722 |
+
|
| 723 |
+
def prepare_tokens_with_masks(self, x, masks=None):
|
| 724 |
+
B, nc, w, h = x.shape
|
| 725 |
+
x = self.patch_embed(x)
|
| 726 |
+
if masks is not None:
|
| 727 |
+
x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
|
| 728 |
+
|
| 729 |
+
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
|
| 730 |
+
x = x + self.interpolate_pos_encoding(x, w, h)
|
| 731 |
+
|
| 732 |
+
return x
|
| 733 |
+
|
| 734 |
+
def forward_features_list(self, x_list, masks_list):
|
| 735 |
+
x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
|
| 736 |
+
for blk in self.blocks:
|
| 737 |
+
x = blk(x)
|
| 738 |
+
|
| 739 |
+
all_x = x
|
| 740 |
+
output = []
|
| 741 |
+
for x, masks in zip(all_x, masks_list):
|
| 742 |
+
x_norm = self.norm(x)
|
| 743 |
+
output.append(
|
| 744 |
+
{
|
| 745 |
+
"x_norm_clstoken": x_norm[:, 0],
|
| 746 |
+
"x_norm_patchtokens": x_norm[:, 1:],
|
| 747 |
+
"x_prenorm": x,
|
| 748 |
+
"masks": masks,
|
| 749 |
+
}
|
| 750 |
+
)
|
| 751 |
+
return output
|
| 752 |
+
|
| 753 |
+
def forward_features(self, x, masks=None):
|
| 754 |
+
if isinstance(x, list):
|
| 755 |
+
return self.forward_features_list(x, masks)
|
| 756 |
+
|
| 757 |
+
B, C, H, W = x.size()
|
| 758 |
+
pad_h = (self.patch_size - H % self.patch_size)
|
| 759 |
+
pad_w = (self.patch_size - W % self.patch_size)
|
| 760 |
+
if pad_h == self.patch_size:
|
| 761 |
+
pad_h = 0
|
| 762 |
+
if pad_w == self.patch_size:
|
| 763 |
+
pad_w = 0
|
| 764 |
+
#x = nn.functional.pad(x, (pad_h//2, pad_h-pad_h//2, pad_w//2, pad_w-pad_w//2))
|
| 765 |
+
if pad_h + pad_w > 0:
|
| 766 |
+
x = torch.nn.functional.interpolate(x, (H+pad_h, W+pad_w), mode='bilinear')
|
| 767 |
+
|
| 768 |
+
x = self.prepare_tokens_with_masks(x, masks)
|
| 769 |
+
|
| 770 |
+
features = []
|
| 771 |
+
for blk in self.blocks:
|
| 772 |
+
x = blk(x)
|
| 773 |
+
# for idx in range(len(self.blocks[0])):
|
| 774 |
+
# x = self.blocks[0][idx](x)
|
| 775 |
+
# if (idx + 1) % (len(self.blocks[0]) // 4) == 0:
|
| 776 |
+
# features.append(x)
|
| 777 |
+
|
| 778 |
+
#return [features, (B, (H+pad_h)//self.patch_size, (W+pad_w)//self.patch_size, H, W)]
|
| 779 |
+
|
| 780 |
+
x_norm = self.norm(x)
|
| 781 |
+
# return {
|
| 782 |
+
# "x_norm_clstoken": x_norm[:, 0],
|
| 783 |
+
# "x_norm_patchtokens": x_norm[:, 1:],
|
| 784 |
+
# "x_prenorm": x,
|
| 785 |
+
# "masks": masks,
|
| 786 |
+
# }
|
| 787 |
+
features = []
|
| 788 |
+
features.append(x_norm)
|
| 789 |
+
features.append(x_norm)
|
| 790 |
+
features.append(x_norm)
|
| 791 |
+
features.append(x_norm)
|
| 792 |
+
return [features, (B, (H+pad_h)//self.patch_size, (W+pad_w)//self.patch_size, H, W)]
|
| 793 |
+
|
| 794 |
+
def _get_intermediate_layers_not_chunked(self, x, n=1):
|
| 795 |
+
x = self.prepare_tokens_with_masks(x)
|
| 796 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 797 |
+
output, total_block_len = [], len(self.blocks)
|
| 798 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 799 |
+
for i, blk in enumerate(self.blocks):
|
| 800 |
+
x = blk(x)
|
| 801 |
+
if i in blocks_to_take:
|
| 802 |
+
output.append(x)
|
| 803 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 804 |
+
return output
|
| 805 |
+
|
| 806 |
+
def _get_intermediate_layers_chunked(self, x, n=1):
|
| 807 |
+
x = self.prepare_tokens_with_masks(x)
|
| 808 |
+
output, i, total_block_len = [], 0, len(self.blocks[-1])
|
| 809 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 810 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 811 |
+
for block_chunk in self.blocks:
|
| 812 |
+
for blk in block_chunk[i:]: # Passing the nn.Identity()
|
| 813 |
+
x = blk(x)
|
| 814 |
+
if i in blocks_to_take:
|
| 815 |
+
output.append(x)
|
| 816 |
+
i += 1
|
| 817 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 818 |
+
return output
|
| 819 |
+
|
| 820 |
+
def get_intermediate_layers(
|
| 821 |
+
self,
|
| 822 |
+
x: torch.Tensor,
|
| 823 |
+
n: Union[int, Sequence] = 1, # Layers or n last layers to take
|
| 824 |
+
reshape: bool = False,
|
| 825 |
+
return_class_token: bool = False,
|
| 826 |
+
norm=True,
|
| 827 |
+
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
|
| 828 |
+
if self.chunked_blocks:
|
| 829 |
+
outputs = self._get_intermediate_layers_chunked(x, n)
|
| 830 |
+
else:
|
| 831 |
+
outputs = self._get_intermediate_layers_not_chunked(x, n)
|
| 832 |
+
if norm:
|
| 833 |
+
outputs = [self.norm(out) for out in outputs]
|
| 834 |
+
class_tokens = [out[:, 0] for out in outputs]
|
| 835 |
+
outputs = [out[:, 1:] for out in outputs]
|
| 836 |
+
if reshape:
|
| 837 |
+
B, _, w, h = x.shape
|
| 838 |
+
outputs = [
|
| 839 |
+
out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
|
| 840 |
+
for out in outputs
|
| 841 |
+
]
|
| 842 |
+
if return_class_token:
|
| 843 |
+
return tuple(zip(outputs, class_tokens))
|
| 844 |
+
return tuple(outputs)
|
| 845 |
+
|
| 846 |
+
def forward(self, *args, is_training=False, **kwargs):
|
| 847 |
+
ret = self.forward_features(*args, **kwargs)
|
| 848 |
+
return ret
|
| 849 |
+
# if is_training:
|
| 850 |
+
# return ret
|
| 851 |
+
# else:
|
| 852 |
+
# return self.head(ret["x_norm_clstoken"])
|
| 853 |
+
|
| 854 |
+
|
| 855 |
+
class PosConv(nn.Module):
|
| 856 |
+
# PEG from https://arxiv.org/abs/2102.10882
|
| 857 |
+
def __init__(self, in_chans, embed_dim=768, stride=1):
|
| 858 |
+
super(PosConv, self).__init__()
|
| 859 |
+
self.proj = nn.Sequential(
|
| 860 |
+
nn.Conv2d(in_chans, embed_dim, 37, stride, 18, bias=True, groups=embed_dim),
|
| 861 |
+
)
|
| 862 |
+
self.stride = stride
|
| 863 |
+
|
| 864 |
+
def forward(self, x, size):
|
| 865 |
+
B, N, C = x.shape
|
| 866 |
+
cnn_feat_token = x.transpose(1, 2).view(B, C, *size)
|
| 867 |
+
x = self.proj(cnn_feat_token)
|
| 868 |
+
if self.stride == 1:
|
| 869 |
+
x += cnn_feat_token
|
| 870 |
+
x = x.flatten(2).transpose(1, 2)
|
| 871 |
+
return x
|
| 872 |
+
|
| 873 |
+
#def no_weight_decay(self):
|
| 874 |
+
#return ['proj.%d.weight' % i for i in range(4)]
|
| 875 |
+
|
| 876 |
+
class DinoWindowVisionTransformer(nn.Module):
|
| 877 |
+
def __init__(
|
| 878 |
+
self,
|
| 879 |
+
img_size=224,
|
| 880 |
+
patch_size=16,
|
| 881 |
+
in_chans=3,
|
| 882 |
+
embed_dim=768,
|
| 883 |
+
depth=12,
|
| 884 |
+
num_heads=12,
|
| 885 |
+
mlp_ratio=4.0,
|
| 886 |
+
qkv_bias=True,
|
| 887 |
+
ffn_bias=True,
|
| 888 |
+
proj_bias=True,
|
| 889 |
+
drop_path_rate=0.0,
|
| 890 |
+
drop_path_uniform=False,
|
| 891 |
+
#init_values=None, # for layerscale: None or 0 => no layerscale
|
| 892 |
+
init_values=1e-5, # for layerscale: None or 0 => no layerscale
|
| 893 |
+
embed_layer=PatchEmbed,
|
| 894 |
+
act_layer=nn.GELU,
|
| 895 |
+
block_fn=NestedTensorBlock,
|
| 896 |
+
ffn_layer="mlp",
|
| 897 |
+
block_chunks=1,
|
| 898 |
+
window_size=7,
|
| 899 |
+
**kwargs
|
| 900 |
+
):
|
| 901 |
+
"""
|
| 902 |
+
Args:
|
| 903 |
+
img_size (int, tuple): input image size
|
| 904 |
+
patch_size (int, tuple): patch size
|
| 905 |
+
in_chans (int): number of input channels
|
| 906 |
+
embed_dim (int): embedding dimension
|
| 907 |
+
depth (int): depth of transformer
|
| 908 |
+
num_heads (int): number of attention heads
|
| 909 |
+
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
| 910 |
+
qkv_bias (bool): enable bias for qkv if True
|
| 911 |
+
proj_bias (bool): enable bias for proj in attn if True
|
| 912 |
+
ffn_bias (bool): enable bias for ffn if True
|
| 913 |
+
drop_path_rate (float): stochastic depth rate
|
| 914 |
+
drop_path_uniform (bool): apply uniform drop rate across blocks
|
| 915 |
+
weight_init (str): weight init scheme
|
| 916 |
+
init_values (float): layer-scale init values
|
| 917 |
+
embed_layer (nn.Module): patch embedding layer
|
| 918 |
+
act_layer (nn.Module): MLP activation layer
|
| 919 |
+
block_fn (nn.Module): transformer block class
|
| 920 |
+
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
|
| 921 |
+
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
|
| 922 |
+
"""
|
| 923 |
+
super().__init__()
|
| 924 |
+
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
| 925 |
+
|
| 926 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 927 |
+
self.num_tokens = 1
|
| 928 |
+
self.n_blocks = depth
|
| 929 |
+
self.num_heads = num_heads
|
| 930 |
+
self.patch_size = patch_size
|
| 931 |
+
|
| 932 |
+
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
| 933 |
+
num_patches = self.patch_embed.num_patches
|
| 934 |
+
|
| 935 |
+
#self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 936 |
+
#self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
|
| 937 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
|
| 938 |
+
|
| 939 |
+
self.pos_conv = PosConv(self.embed_dim, self.embed_dim)
|
| 940 |
+
|
| 941 |
+
self.window_size = window_size
|
| 942 |
+
#self.conv_block = nn.ModuleList([ConvBlock(embed_dim) for i in range(4)])
|
| 943 |
+
#self.conv_block = nn.ModuleList([nn.Identity() for i in range(4)])
|
| 944 |
+
|
| 945 |
+
if drop_path_uniform is True:
|
| 946 |
+
dpr = [drop_path_rate] * depth
|
| 947 |
+
else:
|
| 948 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
| 949 |
+
|
| 950 |
+
if ffn_layer == "mlp":
|
| 951 |
+
logger.info("using MLP layer as FFN")
|
| 952 |
+
ffn_layer = Mlp
|
| 953 |
+
elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
|
| 954 |
+
logger.info("using SwiGLU layer as FFN")
|
| 955 |
+
ffn_layer = SwiGLUFFNFused
|
| 956 |
+
elif ffn_layer == "identity":
|
| 957 |
+
logger.info("using Identity layer as FFN")
|
| 958 |
+
|
| 959 |
+
def f(*args, **kwargs):
|
| 960 |
+
return nn.Identity()
|
| 961 |
+
|
| 962 |
+
ffn_layer = f
|
| 963 |
+
else:
|
| 964 |
+
raise NotImplementedError
|
| 965 |
+
|
| 966 |
+
blocks_list = [
|
| 967 |
+
block_fn(
|
| 968 |
+
dim=embed_dim,
|
| 969 |
+
num_heads=num_heads,
|
| 970 |
+
mlp_ratio=mlp_ratio,
|
| 971 |
+
qkv_bias=qkv_bias,
|
| 972 |
+
proj_bias=proj_bias,
|
| 973 |
+
ffn_bias=ffn_bias,
|
| 974 |
+
drop_path=dpr[i],
|
| 975 |
+
norm_layer=norm_layer,
|
| 976 |
+
act_layer=act_layer,
|
| 977 |
+
ffn_layer=ffn_layer,
|
| 978 |
+
init_values=init_values,
|
| 979 |
+
)
|
| 980 |
+
for i in range(depth)
|
| 981 |
+
]
|
| 982 |
+
if block_chunks > 0:
|
| 983 |
+
self.chunked_blocks = True
|
| 984 |
+
chunked_blocks = []
|
| 985 |
+
chunksize = depth // block_chunks
|
| 986 |
+
for i in range(0, depth, chunksize):
|
| 987 |
+
# this is to keep the block index consistent if we chunk the block list
|
| 988 |
+
chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
|
| 989 |
+
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
|
| 990 |
+
else:
|
| 991 |
+
self.chunked_blocks = False
|
| 992 |
+
self.blocks = nn.ModuleList(blocks_list)
|
| 993 |
+
|
| 994 |
+
self.norm = norm_layer(embed_dim)
|
| 995 |
+
self.head = nn.Identity()
|
| 996 |
+
|
| 997 |
+
self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
|
| 998 |
+
|
| 999 |
+
self.nh = -1
|
| 1000 |
+
self.nw = -1
|
| 1001 |
+
try:
|
| 1002 |
+
H = cfg.data_basic['crop_size'][0]
|
| 1003 |
+
W = cfg.data_basic['crop_size'][1]
|
| 1004 |
+
pad_h = (self.patch_size - H % self.patch_size)
|
| 1005 |
+
pad_w = (self.patch_size - W % self.patch_size)
|
| 1006 |
+
if pad_h == self.patch_size:
|
| 1007 |
+
pad_h = 0
|
| 1008 |
+
if pad_w == self.patch_size:
|
| 1009 |
+
pad_w = 0
|
| 1010 |
+
self.nh = (H + pad_h) // self.patch_size
|
| 1011 |
+
self.nw = (W + pad_w) // self.patch_size
|
| 1012 |
+
self.prepare_attn_bias((self.nh, self.nw))
|
| 1013 |
+
except:
|
| 1014 |
+
pass
|
| 1015 |
+
self.init_weights()
|
| 1016 |
+
|
| 1017 |
+
self.total_step = 10000 # For PE -> GPE transfer
|
| 1018 |
+
self.start_step = 2000
|
| 1019 |
+
self.current_step = 20000
|
| 1020 |
+
|
| 1021 |
+
def init_weights(self):
|
| 1022 |
+
#trunc_normal_(self.pos_embed, std=0.02)
|
| 1023 |
+
#nn.init.normal_(self.cls_token, std=1e-6)
|
| 1024 |
+
named_apply(init_weights_vit_timm, self)
|
| 1025 |
+
for i in range(4):
|
| 1026 |
+
try:
|
| 1027 |
+
nn.init.constant_(self.conv_block[i].conv2.weight, 0.0)
|
| 1028 |
+
except:
|
| 1029 |
+
pass
|
| 1030 |
+
|
| 1031 |
+
def interpolate_pos_encoding(self, x, w, h):
|
| 1032 |
+
previous_dtype = x.dtype
|
| 1033 |
+
#npatch = x.shape[1] - 1
|
| 1034 |
+
#N = self.pos_embed.shape[1] - 1
|
| 1035 |
+
npatch = x.shape[1]
|
| 1036 |
+
N = self.pos_embed.shape[1]
|
| 1037 |
+
if npatch == N and w == h:
|
| 1038 |
+
return self.pos_embed
|
| 1039 |
+
pos_embed = self.pos_embed.float()
|
| 1040 |
+
#class_pos_embed = pos_embed[:, 0]
|
| 1041 |
+
#patch_pos_embed = pos_embed[:, 1:]
|
| 1042 |
+
patch_pos_embed = pos_embed
|
| 1043 |
+
dim = x.shape[-1]
|
| 1044 |
+
w0 = w // self.patch_size
|
| 1045 |
+
h0 = h // self.patch_size
|
| 1046 |
+
# we add a small number to avoid floating point error in the interpolation
|
| 1047 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
| 1048 |
+
w0, h0 = w0 + 0.1, h0 + 0.1
|
| 1049 |
+
|
| 1050 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 1051 |
+
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
|
| 1052 |
+
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
|
| 1053 |
+
mode="bicubic",
|
| 1054 |
+
)
|
| 1055 |
+
|
| 1056 |
+
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
|
| 1057 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 1058 |
+
return patch_pos_embed.to(previous_dtype)
|
| 1059 |
+
#return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
|
| 1060 |
+
|
| 1061 |
+
def window_partition(self, x: torch.Tensor, window_size: int, hw: Tuple[int, int], conv_feature=False) -> Tuple[torch.Tensor, Tuple[int, int]]:
|
| 1062 |
+
"""
|
| 1063 |
+
Partition into non-overlapping windows with padding if needed.
|
| 1064 |
+
Args:
|
| 1065 |
+
x (tensor): input tokens with [B, H, W, C].
|
| 1066 |
+
window_size (int): window size.
|
| 1067 |
+
|
| 1068 |
+
Returns:
|
| 1069 |
+
windows: windows after partition with [B * num_windows, window_size, window_size, C].
|
| 1070 |
+
(Hp, Wp): padded height and width before partition
|
| 1071 |
+
"""
|
| 1072 |
+
if conv_feature == False:
|
| 1073 |
+
B, N, C = x.shape
|
| 1074 |
+
H, W = hw[0], hw[1]
|
| 1075 |
+
|
| 1076 |
+
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
| 1077 |
+
|
| 1078 |
+
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size * window_size, C)
|
| 1079 |
+
else:
|
| 1080 |
+
B, C, H, W = x.shape
|
| 1081 |
+
|
| 1082 |
+
x = x.view(B, C, H // window_size, window_size, W // window_size, window_size)
|
| 1083 |
+
|
| 1084 |
+
windows = x.permute(0, 2, 4, 3, 5, 1).contiguous().view(-1, window_size * window_size, C)
|
| 1085 |
+
|
| 1086 |
+
#y = torch.cat((x_cls, windows), dim=1)
|
| 1087 |
+
return windows #, (Hp, Wp)
|
| 1088 |
+
|
| 1089 |
+
|
| 1090 |
+
def window_unpartition(self,
|
| 1091 |
+
windows: torch.Tensor, window_size: int, hw: Tuple[int, int], conv_feature=False
|
| 1092 |
+
) -> torch.Tensor:
|
| 1093 |
+
"""
|
| 1094 |
+
Window unpartition into original sequences and removing padding.
|
| 1095 |
+
Args:
|
| 1096 |
+
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
|
| 1097 |
+
window_size (int): window size.
|
| 1098 |
+
pad_hw (Tuple): padded height and width (Hp, Wp).
|
| 1099 |
+
hw (Tuple): original height and width (H, W) before padding.
|
| 1100 |
+
|
| 1101 |
+
Returns:
|
| 1102 |
+
x: unpartitioned sequences with [B, H, W, C].
|
| 1103 |
+
"""
|
| 1104 |
+
H, W = hw
|
| 1105 |
+
|
| 1106 |
+
B = windows.shape[0] // (H * W // window_size // window_size)
|
| 1107 |
+
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
| 1108 |
+
|
| 1109 |
+
if conv_feature == False:
|
| 1110 |
+
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp * Wp, -1)
|
| 1111 |
+
else:
|
| 1112 |
+
C = windows.shape[-1]
|
| 1113 |
+
x = x.permute(0, 5, 1, 3, 2, 4).contiguous().view(B, C, H, W)
|
| 1114 |
+
|
| 1115 |
+
# if Hp > H or Wp > W:
|
| 1116 |
+
# x = x[:, :H, :W, :].contiguous()
|
| 1117 |
+
return x
|
| 1118 |
+
|
| 1119 |
+
def prepare_tokens_with_masks(self, x, masks=None, step=-1):
|
| 1120 |
+
B, nc, w, h = x.shape
|
| 1121 |
+
x = self.patch_embed(x)
|
| 1122 |
+
if masks is not None:
|
| 1123 |
+
x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
|
| 1124 |
+
|
| 1125 |
+
#x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
|
| 1126 |
+
if step == -1:
|
| 1127 |
+
step = self.current_step
|
| 1128 |
+
else:
|
| 1129 |
+
self.current_step = step
|
| 1130 |
+
|
| 1131 |
+
if step < self.start_step:
|
| 1132 |
+
coef = 0.0
|
| 1133 |
+
elif step < self.total_step:
|
| 1134 |
+
coef = (step - self.start_step) / (self.total_step - self.start_step)
|
| 1135 |
+
else:
|
| 1136 |
+
coef = 1.0
|
| 1137 |
+
|
| 1138 |
+
x = x + (1 - coef) * self.interpolate_pos_encoding(x, w, h) + coef * self.pos_conv(x, (self.nh, self.nw))
|
| 1139 |
+
|
| 1140 |
+
return x
|
| 1141 |
+
|
| 1142 |
+
def prepare_attn_bias(self, shape):
|
| 1143 |
+
window_size = self.window_size
|
| 1144 |
+
if window_size <= 0:
|
| 1145 |
+
return
|
| 1146 |
+
|
| 1147 |
+
import xformers.components.attention.attention_patterns as AP
|
| 1148 |
+
|
| 1149 |
+
nh, nw = shape
|
| 1150 |
+
radius = (window_size-1)//2
|
| 1151 |
+
mask_ori = AP.local_2d_pattern(nh, nw, distance = radius + 0.1, p=torch.inf).cuda()
|
| 1152 |
+
|
| 1153 |
+
pad = (8 - (nh * nw) % 8)
|
| 1154 |
+
if pad == 8:
|
| 1155 |
+
pad = 0
|
| 1156 |
+
mask_pad = nn.functional.pad(mask_ori, (0, pad)).contiguous()
|
| 1157 |
+
if pad > 0:
|
| 1158 |
+
mask = mask_pad[:, :-pad].view(nh, nw, nh, nw)
|
| 1159 |
+
else:
|
| 1160 |
+
mask = mask_pad[:, :].view(nh, nw, nh, nw)
|
| 1161 |
+
|
| 1162 |
+
# angle
|
| 1163 |
+
mask[:radius+1, :radius+1, :window_size, :window_size] = True
|
| 1164 |
+
mask[:radius+1, -radius-1:, :window_size, -window_size:] = True
|
| 1165 |
+
mask[-radius-1:, :radius+1, -window_size:, :window_size] = True
|
| 1166 |
+
mask[-radius-1:, -radius-1:, -window_size:, -window_size:] = True
|
| 1167 |
+
|
| 1168 |
+
# edge
|
| 1169 |
+
mask[radius+1:-radius-1, :radius+1, :, :] = mask[radius+1:-radius-1, radius:radius+1, :, :]
|
| 1170 |
+
mask[radius+1:-radius-1, -radius-1:, :, :] = mask[radius+1:-radius-1, -radius-1:-radius, :, :]
|
| 1171 |
+
mask[:radius+1, radius+1:-radius-1, :, :] = mask[radius:radius+1, radius+1:-radius-1, :, :]
|
| 1172 |
+
mask[-radius-1:, radius+1:-radius-1, :, :] = mask[-radius-1:-radius, radius+1:-radius-1, :, :]
|
| 1173 |
+
|
| 1174 |
+
mask = mask.view(nh*nw, nh*nw)
|
| 1175 |
+
bias_pad = torch.log(mask_pad)
|
| 1176 |
+
#bias = bias_pad[:, :-pad]
|
| 1177 |
+
self.register_buffer('attn_bias', bias_pad)
|
| 1178 |
+
|
| 1179 |
+
return bias_pad
|
| 1180 |
+
|
| 1181 |
+
def forward_features_list(self, x_list, masks_list):
|
| 1182 |
+
x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
|
| 1183 |
+
for blk in self.blocks:
|
| 1184 |
+
x = blk(x)
|
| 1185 |
+
|
| 1186 |
+
all_x = x
|
| 1187 |
+
output = []
|
| 1188 |
+
for x, masks in zip(all_x, masks_list):
|
| 1189 |
+
x_norm = self.norm(x)
|
| 1190 |
+
output.append(
|
| 1191 |
+
{
|
| 1192 |
+
"x_norm_clstoken": x_norm[:, 0],
|
| 1193 |
+
"x_norm_patchtokens": x_norm[:, 1:],
|
| 1194 |
+
"x_prenorm": x,
|
| 1195 |
+
"masks": masks,
|
| 1196 |
+
}
|
| 1197 |
+
)
|
| 1198 |
+
return output
|
| 1199 |
+
|
| 1200 |
+
def forward_features(self, x, masks=None, **kwargs):
|
| 1201 |
+
if isinstance(x, list):
|
| 1202 |
+
return self.forward_features_list(x, masks)
|
| 1203 |
+
|
| 1204 |
+
B, C, H, W = x.size()
|
| 1205 |
+
pad_h = (self.patch_size - H % self.patch_size)
|
| 1206 |
+
pad_w = (self.patch_size - W % self.patch_size)
|
| 1207 |
+
if pad_h == self.patch_size:
|
| 1208 |
+
pad_h = 0
|
| 1209 |
+
if pad_w == self.patch_size:
|
| 1210 |
+
pad_w = 0
|
| 1211 |
+
#x = nn.functional.pad(x, (pad_h//2, pad_h-pad_h//2, pad_w//2, pad_w-pad_w//2))
|
| 1212 |
+
if pad_h + pad_w > 0:
|
| 1213 |
+
x = torch.nn.functional.interpolate(x, (H+pad_h, W+pad_w), mode='bilinear')
|
| 1214 |
+
|
| 1215 |
+
nh = (H+pad_h)//self.patch_size
|
| 1216 |
+
nw = (W+pad_w)//self.patch_size
|
| 1217 |
+
|
| 1218 |
+
if self.window_size > 0:
|
| 1219 |
+
if nh == self.nh and nw == self.nw:
|
| 1220 |
+
attn_bias = self.attn_bias
|
| 1221 |
+
else:
|
| 1222 |
+
attn_bias = self.prepare_attn_bias(((H+pad_h)//self.patch_size, (W+pad_w)//self.patch_size))
|
| 1223 |
+
self.nh = nh
|
| 1224 |
+
self.nw = nw
|
| 1225 |
+
attn_bias = attn_bias.unsqueeze(0).repeat(B * self.num_heads, 1, 1)
|
| 1226 |
+
else:
|
| 1227 |
+
attn_bias = None
|
| 1228 |
+
|
| 1229 |
+
x = self.prepare_tokens_with_masks(x, masks)
|
| 1230 |
+
#x = self.patch_embed(x)
|
| 1231 |
+
|
| 1232 |
+
features = []
|
| 1233 |
+
#x = self.window_partition(x, self.window_size, (H // self.patch_size, W // self.patch_size))
|
| 1234 |
+
for blk in self.blocks:
|
| 1235 |
+
x = blk(x, attn_bias)
|
| 1236 |
+
#x = self.window_unpartition(x, self.window_size, (H // self.patch_size, W // self.patch_size))
|
| 1237 |
+
|
| 1238 |
+
# for idx in range(len(self.blocks[0])):
|
| 1239 |
+
# x = self.blocks[0][idx](x, attn_bias)
|
| 1240 |
+
|
| 1241 |
+
# if (idx + 1) % (len(self.blocks[0]) // 4) == 0:
|
| 1242 |
+
# x = self.window_unpartition(x, self.window_size, (H // self.patch_size, W // self.patch_size), conv_feature=True)
|
| 1243 |
+
# x = self.conv_block[idx // (len(self.blocks[0]) // 4)](x)
|
| 1244 |
+
# if idx + 1 != len(self.blocks[0]):
|
| 1245 |
+
# x = self.window_partition(x, self.window_size, (H // self.patch_size, W // self.patch_size), conv_feature=True)
|
| 1246 |
+
# else:
|
| 1247 |
+
# b, c, h, w = x.size()
|
| 1248 |
+
# x = x.permute(0, 2, 3, 1).contiguous().view(b, h, w, c)
|
| 1249 |
+
#features.append(x)
|
| 1250 |
+
|
| 1251 |
+
#return [features, (B, (H+pad_h)//self.patch_size, (W+pad_w)//self.patch_size, H, W)]
|
| 1252 |
+
|
| 1253 |
+
x_norm = self.norm(x)
|
| 1254 |
+
# return {
|
| 1255 |
+
# "x_norm_clstoken": x_norm[:, 0],
|
| 1256 |
+
# "x_norm_patchtokens": x_norm[:, 1:],
|
| 1257 |
+
# "x_prenorm": x,
|
| 1258 |
+
# "masks": masks,
|
| 1259 |
+
# }
|
| 1260 |
+
features = []
|
| 1261 |
+
features.append(x_norm)
|
| 1262 |
+
features.append(x_norm)
|
| 1263 |
+
features.append(x_norm)
|
| 1264 |
+
features.append(x_norm)
|
| 1265 |
+
return [features, (B, (H+pad_h)//self.patch_size, (W+pad_w)//self.patch_size, H, W)]
|
| 1266 |
+
|
| 1267 |
+
def _get_intermediate_layers_not_chunked(self, x, n=1):
|
| 1268 |
+
x = self.prepare_tokens_with_masks(x)
|
| 1269 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 1270 |
+
output, total_block_len = [], len(self.blocks)
|
| 1271 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 1272 |
+
for i, blk in enumerate(self.blocks):
|
| 1273 |
+
x = blk(x)
|
| 1274 |
+
if i in blocks_to_take:
|
| 1275 |
+
output.append(x)
|
| 1276 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 1277 |
+
return output
|
| 1278 |
+
|
| 1279 |
+
def _get_intermediate_layers_chunked(self, x, n=1):
|
| 1280 |
+
x = self.prepare_tokens_with_masks(x)
|
| 1281 |
+
output, i, total_block_len = [], 0, len(self.blocks[-1])
|
| 1282 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 1283 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 1284 |
+
for block_chunk in self.blocks:
|
| 1285 |
+
for blk in block_chunk[i:]: # Passing the nn.Identity()
|
| 1286 |
+
x = blk(x)
|
| 1287 |
+
if i in blocks_to_take:
|
| 1288 |
+
output.append(x)
|
| 1289 |
+
i += 1
|
| 1290 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 1291 |
+
return output
|
| 1292 |
+
|
| 1293 |
+
def get_intermediate_layers(
|
| 1294 |
+
self,
|
| 1295 |
+
x: torch.Tensor,
|
| 1296 |
+
n: Union[int, Sequence] = 1, # Layers or n last layers to take
|
| 1297 |
+
reshape: bool = False,
|
| 1298 |
+
return_class_token: bool = False,
|
| 1299 |
+
norm=True,
|
| 1300 |
+
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
|
| 1301 |
+
if self.chunked_blocks:
|
| 1302 |
+
outputs = self._get_intermediate_layers_chunked(x, n)
|
| 1303 |
+
else:
|
| 1304 |
+
outputs = self._get_intermediate_layers_not_chunked(x, n)
|
| 1305 |
+
if norm:
|
| 1306 |
+
outputs = [self.norm(out) for out in outputs]
|
| 1307 |
+
class_tokens = [out[:, 0] for out in outputs]
|
| 1308 |
+
outputs = [out[:, 1:] for out in outputs]
|
| 1309 |
+
if reshape:
|
| 1310 |
+
B, _, w, h = x.shape
|
| 1311 |
+
outputs = [
|
| 1312 |
+
out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
|
| 1313 |
+
for out in outputs
|
| 1314 |
+
]
|
| 1315 |
+
if return_class_token:
|
| 1316 |
+
return tuple(zip(outputs, class_tokens))
|
| 1317 |
+
return tuple(outputs)
|
| 1318 |
+
|
| 1319 |
+
def forward(self, *args, is_training=False, **kwargs):
|
| 1320 |
+
ret = self.forward_features(*args, **kwargs)
|
| 1321 |
+
return ret
|
| 1322 |
+
# if is_training:
|
| 1323 |
+
# return ret
|
| 1324 |
+
# else:
|
| 1325 |
+
# return self.head(ret["x_norm_clstoken"])
|
| 1326 |
+
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
|
| 1330 |
+
def init_weights_vit_timm(module: nn.Module, name: str = ""):
|
| 1331 |
+
"""ViT weight initialization, original timm impl (for reproducibility)"""
|
| 1332 |
+
if isinstance(module, nn.Linear):
|
| 1333 |
+
trunc_normal_(module.weight, std=0.02)
|
| 1334 |
+
if module.bias is not None:
|
| 1335 |
+
nn.init.zeros_(module.bias)
|
| 1336 |
+
|
| 1337 |
+
|
| 1338 |
+
def vit_small(patch_size=14, **kwargs):
|
| 1339 |
+
model = DinoVisionTransformer(
|
| 1340 |
+
patch_size=patch_size,
|
| 1341 |
+
embed_dim=384,
|
| 1342 |
+
depth=12,
|
| 1343 |
+
num_heads=6,
|
| 1344 |
+
mlp_ratio=4,
|
| 1345 |
+
block_fn=partial(NestedTensorBlock, attn_class=MemEffAttention),
|
| 1346 |
+
**kwargs,
|
| 1347 |
+
)
|
| 1348 |
+
return model
|
| 1349 |
+
|
| 1350 |
+
|
| 1351 |
+
def vit_base(patch_size=14, **kwargs):
|
| 1352 |
+
model = DinoWindowVisionTransformer(
|
| 1353 |
+
patch_size=patch_size,
|
| 1354 |
+
embed_dim=768,
|
| 1355 |
+
depth=12,
|
| 1356 |
+
num_heads=12,
|
| 1357 |
+
mlp_ratio=4,
|
| 1358 |
+
block_fn=partial(NestedTensorBlock, attn_class=MemEffAttention),
|
| 1359 |
+
**kwargs,
|
| 1360 |
+
)
|
| 1361 |
+
return model
|
| 1362 |
+
|
| 1363 |
+
|
| 1364 |
+
def vit_large(patch_size=14, checkpoint=None, **kwargs):
|
| 1365 |
+
model = DinoVisionTransformer(
|
| 1366 |
+
img_size = 518,
|
| 1367 |
+
patch_size=patch_size,
|
| 1368 |
+
embed_dim=1024,
|
| 1369 |
+
depth=24,
|
| 1370 |
+
num_heads=16,
|
| 1371 |
+
mlp_ratio=4,
|
| 1372 |
+
block_fn=partial(NestedTensorBlock, attn_class=MemEffAttention),
|
| 1373 |
+
**kwargs,
|
| 1374 |
+
)
|
| 1375 |
+
|
| 1376 |
+
if checkpoint is not None:
|
| 1377 |
+
with open(checkpoint, "rb") as f:
|
| 1378 |
+
state_dict = torch.load(f)
|
| 1379 |
+
try:
|
| 1380 |
+
model.load_state_dict(state_dict, strict=True)
|
| 1381 |
+
except:
|
| 1382 |
+
new_state_dict = {}
|
| 1383 |
+
for key, value in state_dict.items():
|
| 1384 |
+
if 'blocks' in key:
|
| 1385 |
+
key_new = 'blocks.0' + key[len('blocks'):]
|
| 1386 |
+
else:
|
| 1387 |
+
key_new = key
|
| 1388 |
+
new_state_dict[key_new] = value
|
| 1389 |
+
|
| 1390 |
+
model.load_state_dict(new_state_dict, strict=True)
|
| 1391 |
+
#del model.norm
|
| 1392 |
+
del model.mask_token
|
| 1393 |
+
return model
|
| 1394 |
+
|
| 1395 |
+
# model = DinoWindowVisionTransformer(
|
| 1396 |
+
# img_size = 518,
|
| 1397 |
+
# patch_size=patch_size,
|
| 1398 |
+
# embed_dim=1024,
|
| 1399 |
+
# depth=24,
|
| 1400 |
+
# num_heads=16,
|
| 1401 |
+
# mlp_ratio=4,
|
| 1402 |
+
# block_fn=partial(NestedTensorBlock, attn_class=MemEffAttention),
|
| 1403 |
+
# window_size=37,
|
| 1404 |
+
# **kwargs,
|
| 1405 |
+
# )
|
| 1406 |
+
|
| 1407 |
+
# if checkpoint is not None:
|
| 1408 |
+
# with open(checkpoint, "rb") as f:
|
| 1409 |
+
# state_dict = torch.load(f)
|
| 1410 |
+
# try:
|
| 1411 |
+
# model.load_state_dict(state_dict, strict=True)
|
| 1412 |
+
# except:
|
| 1413 |
+
# new_state_dict = {}
|
| 1414 |
+
# for key, value in state_dict.items():
|
| 1415 |
+
# if 'blocks' in key:
|
| 1416 |
+
# key_new = 'blocks.0' + key[len('blocks'):]
|
| 1417 |
+
# else:
|
| 1418 |
+
# key_new = key
|
| 1419 |
+
# if 'pos_embed' in key:
|
| 1420 |
+
# value = value[:, 1:, :]
|
| 1421 |
+
# new_state_dict[key_new] = value
|
| 1422 |
+
|
| 1423 |
+
# model.load_state_dict(new_state_dict, strict=False)
|
| 1424 |
+
# #del model.norm
|
| 1425 |
+
# del model.mask_token
|
| 1426 |
+
return model
|
| 1427 |
+
|
| 1428 |
+
|
| 1429 |
+
def vit_giant2(patch_size=16, **kwargs):
|
| 1430 |
+
"""
|
| 1431 |
+
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
|
| 1432 |
+
"""
|
| 1433 |
+
model = DinoVisionTransformer(
|
| 1434 |
+
patch_size=patch_size,
|
| 1435 |
+
embed_dim=1536,
|
| 1436 |
+
depth=40,
|
| 1437 |
+
num_heads=24,
|
| 1438 |
+
mlp_ratio=4,
|
| 1439 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 1440 |
+
**kwargs,
|
| 1441 |
+
)
|
| 1442 |
+
return model
|
| 1443 |
+
|
| 1444 |
+
if __name__ == '__main__':
|
| 1445 |
+
try:
|
| 1446 |
+
from mmcv.utils import Config
|
| 1447 |
+
except:
|
| 1448 |
+
from mmengine import Config
|
| 1449 |
+
|
| 1450 |
+
#rgb = torch.rand((2, 3, 518, 518)).cuda()
|
| 1451 |
+
|
| 1452 |
+
#cfg.data_basic['crop_size']['0']
|
| 1453 |
+
#cfg.data_basic['crop_size']['1']
|
| 1454 |
+
cfg = Config.fromfile('mu.hu/monodepth/mono/configs/HourglassDecoder/pub12.convlarge.0.3_150.py')
|
| 1455 |
+
|
| 1456 |
+
#rgb = torch.arange(0, 2*3*1036*1036, 1).cuda().float().view(2, 3, 1036, 1036)
|
| 1457 |
+
rgb = torch.zeros(1, 3, 1400, 1680).cuda()
|
| 1458 |
+
model = vit_large(checkpoint="pretrained_weight_repo/vit/dinov2_vitl14_pretrain.pth", kwarg=cfg).cuda()
|
| 1459 |
+
|
| 1460 |
+
#import timm
|
| 1461 |
+
#model2 = timm.models.vision_transformer.vit_large_patch14_dinov2().cuda()
|
| 1462 |
+
#timm.models.load_checkpoint(model2, '/cpfs02/shared/public/yvan/pretrained_weight_repo/vit/dinov2_vitl14_pretrain.pth', filter_fn=timm.models.vision_transformer.checkpoint_filter_fn)
|
| 1463 |
+
|
| 1464 |
+
out1 = model(rgb)
|
| 1465 |
+
#out2 = model2(rgb)
|
| 1466 |
+
temp = 0
|
| 1467 |
+
|
| 1468 |
+
|
| 1469 |
+
|
| 1470 |
+
# import time
|
| 1471 |
+
# window_size = 37
|
| 1472 |
+
# def prepare_window_masks(shape):
|
| 1473 |
+
# if window_size <= 0:
|
| 1474 |
+
# return None
|
| 1475 |
+
# import xformers.components.attention.attention_patterns as AP
|
| 1476 |
+
|
| 1477 |
+
# B, nh, nw, _, _ = shape
|
| 1478 |
+
# radius = (window_size-1)//2
|
| 1479 |
+
# #time0 = time.time()
|
| 1480 |
+
# d = AP.local_nd_distance(nh, nw, distance = radius + 0.1, p=torch.inf).cuda()
|
| 1481 |
+
# #mask = AP.local_2d_pattern(nh, nw, distance = radius + 0.1, p=torch.inf).cuda()
|
| 1482 |
+
# # mask = mask.view(nh, nw, nh, nw)
|
| 1483 |
+
# # #time1 = time.time() - time0
|
| 1484 |
+
|
| 1485 |
+
# # # angle
|
| 1486 |
+
# # mask[:radius+1, :radius+1, :window_size, :window_size] = True
|
| 1487 |
+
# # mask[:radius+1, -radius-1:, :window_size, -window_size:] = True
|
| 1488 |
+
# # mask[-radius-1:, :radius+1, -window_size:, :window_size] = True
|
| 1489 |
+
# # mask[-radius-1:, -radius-1:, -window_size:, -window_size:] = True
|
| 1490 |
+
# # time2 = time.time() - time0 - time1
|
| 1491 |
+
|
| 1492 |
+
# # # edge
|
| 1493 |
+
# # mask[radius+1:-radius-1, :radius+1, :, :] = mask[radius+1:-radius-1, radius:radius+1, :, :]
|
| 1494 |
+
# # mask[radius+1:-radius-1, -radius-1:, :, :] = mask[radius+1:-radius-1, -radius-1:-radius, :, :]
|
| 1495 |
+
# # mask[:radius+1, radius+1:-radius-1, :, :] = mask[radius:radius+1, radius+1:-radius-1, :, :]
|
| 1496 |
+
# # mask[-radius-1:, radius+1:-radius-1, :, :] = mask[-radius-1:-radius, radius+1:-radius-1, :, :]
|
| 1497 |
+
# # time3 = time.time() - time0 - time2
|
| 1498 |
+
# # print(time1, time2, time3)
|
| 1499 |
+
|
| 1500 |
+
# # return mask.view(nw*nw, nh*nw).unsqueeze(0).repeat(B, 1)
|
| 1501 |
+
|
| 1502 |
+
# shape = (1, 55, 55, None, None)
|
| 1503 |
+
# mask = prepare_window_masks(shape)
|
| 1504 |
+
# # temp = 1
|
external/Metric3D/training/mono/model/backbones/ViT_DINO_reg.py
ADDED
|
@@ -0,0 +1,1099 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
# References:
|
| 8 |
+
# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
|
| 9 |
+
# https://github.com/rwightman/pytorch-image-models/tree/master/timm/models/vision_transformer.py
|
| 10 |
+
|
| 11 |
+
from functools import partial
|
| 12 |
+
import math
|
| 13 |
+
import logging
|
| 14 |
+
from typing import Sequence, Tuple, Union, Callable, Optional, Dict, Any, List
|
| 15 |
+
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
from torch import Tensor
|
| 19 |
+
import torch.utils.checkpoint
|
| 20 |
+
from torch.nn.init import trunc_normal_
|
| 21 |
+
|
| 22 |
+
#from dinov2.layers import Mlp, PatchEmbed, SwiGLUFFNFused, MemEffAttention, NestedTensorBlock as Block
|
| 23 |
+
|
| 24 |
+
logger = logging.getLogger("dinov2")
|
| 25 |
+
|
| 26 |
+
class ConvBlock(nn.Module):
|
| 27 |
+
def __init__(self, channels):
|
| 28 |
+
super(ConvBlock, self).__init__()
|
| 29 |
+
|
| 30 |
+
self.act = nn.ReLU(inplace=True)
|
| 31 |
+
self.conv1 = nn.Conv2d(
|
| 32 |
+
channels,
|
| 33 |
+
channels,
|
| 34 |
+
kernel_size=3,
|
| 35 |
+
stride=1,
|
| 36 |
+
padding=1
|
| 37 |
+
)
|
| 38 |
+
self.norm1 = nn.BatchNorm2d(channels)
|
| 39 |
+
self.conv2 = nn.Conv2d(
|
| 40 |
+
channels,
|
| 41 |
+
channels,
|
| 42 |
+
kernel_size=3,
|
| 43 |
+
stride=1,
|
| 44 |
+
padding=1
|
| 45 |
+
)
|
| 46 |
+
self.norm2 = nn.BatchNorm2d(channels)
|
| 47 |
+
|
| 48 |
+
def forward(self, x):
|
| 49 |
+
|
| 50 |
+
out = self.norm1(x)
|
| 51 |
+
out = self.act(out)
|
| 52 |
+
out = self.conv1(out)
|
| 53 |
+
out = self.norm2(out)
|
| 54 |
+
out = self.act(out)
|
| 55 |
+
out = self.conv2(out)
|
| 56 |
+
return x + out
|
| 57 |
+
|
| 58 |
+
def make_2tuple(x):
|
| 59 |
+
if isinstance(x, tuple):
|
| 60 |
+
assert len(x) == 2
|
| 61 |
+
return x
|
| 62 |
+
|
| 63 |
+
assert isinstance(x, int)
|
| 64 |
+
return (x, x)
|
| 65 |
+
|
| 66 |
+
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
|
| 67 |
+
if drop_prob == 0.0 or not training:
|
| 68 |
+
return x
|
| 69 |
+
keep_prob = 1 - drop_prob
|
| 70 |
+
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
| 71 |
+
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
|
| 72 |
+
if keep_prob > 0.0:
|
| 73 |
+
random_tensor.div_(keep_prob)
|
| 74 |
+
output = x * random_tensor
|
| 75 |
+
return output
|
| 76 |
+
|
| 77 |
+
class DropPath(nn.Module):
|
| 78 |
+
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
| 79 |
+
|
| 80 |
+
def __init__(self, drop_prob=None):
|
| 81 |
+
super(DropPath, self).__init__()
|
| 82 |
+
self.drop_prob = drop_prob
|
| 83 |
+
|
| 84 |
+
def forward(self, x):
|
| 85 |
+
return drop_path(x, self.drop_prob, self.training)
|
| 86 |
+
|
| 87 |
+
class LayerScale(nn.Module):
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
dim: int,
|
| 91 |
+
init_values: Union[float, Tensor] = 1e-5,
|
| 92 |
+
inplace: bool = False,
|
| 93 |
+
) -> None:
|
| 94 |
+
super().__init__()
|
| 95 |
+
self.inplace = inplace
|
| 96 |
+
self.gamma = nn.Parameter(init_values * torch.ones(dim))
|
| 97 |
+
|
| 98 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 99 |
+
return x.mul_(self.gamma) if self.inplace else x * self.gamma
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class PatchEmbed(nn.Module):
|
| 103 |
+
"""
|
| 104 |
+
2D image to patch embedding: (B,C,H,W) -> (B,N,D)
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
img_size: Image size.
|
| 108 |
+
patch_size: Patch token size.
|
| 109 |
+
in_chans: Number of input image channels.
|
| 110 |
+
embed_dim: Number of linear projection output channels.
|
| 111 |
+
norm_layer: Normalization layer.
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
def __init__(
|
| 115 |
+
self,
|
| 116 |
+
img_size: Union[int, Tuple[int, int]] = 224,
|
| 117 |
+
patch_size: Union[int, Tuple[int, int]] = 16,
|
| 118 |
+
in_chans: int = 3,
|
| 119 |
+
embed_dim: int = 768,
|
| 120 |
+
norm_layer: Optional[Callable] = None,
|
| 121 |
+
flatten_embedding: bool = True,
|
| 122 |
+
) -> None:
|
| 123 |
+
super().__init__()
|
| 124 |
+
|
| 125 |
+
image_HW = make_2tuple(img_size)
|
| 126 |
+
patch_HW = make_2tuple(patch_size)
|
| 127 |
+
patch_grid_size = (
|
| 128 |
+
image_HW[0] // patch_HW[0],
|
| 129 |
+
image_HW[1] // patch_HW[1],
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
self.img_size = image_HW
|
| 133 |
+
self.patch_size = patch_HW
|
| 134 |
+
self.patches_resolution = patch_grid_size
|
| 135 |
+
self.num_patches = patch_grid_size[0] * patch_grid_size[1]
|
| 136 |
+
|
| 137 |
+
self.in_chans = in_chans
|
| 138 |
+
self.embed_dim = embed_dim
|
| 139 |
+
|
| 140 |
+
self.flatten_embedding = flatten_embedding
|
| 141 |
+
|
| 142 |
+
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_HW, stride=patch_HW)
|
| 143 |
+
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
| 144 |
+
|
| 145 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 146 |
+
_, _, H, W = x.shape
|
| 147 |
+
patch_H, patch_W = self.patch_size
|
| 148 |
+
|
| 149 |
+
assert H % patch_H == 0, f"Input image height {H} is not a multiple of patch height {patch_H}"
|
| 150 |
+
assert W % patch_W == 0, f"Input image width {W} is not a multiple of patch width: {patch_W}"
|
| 151 |
+
|
| 152 |
+
x = self.proj(x) # B C H W
|
| 153 |
+
H, W = x.size(2), x.size(3)
|
| 154 |
+
x = x.flatten(2).transpose(1, 2) # B HW C
|
| 155 |
+
x = self.norm(x)
|
| 156 |
+
if not self.flatten_embedding:
|
| 157 |
+
x = x.reshape(-1, H, W, self.embed_dim) # B H W C
|
| 158 |
+
return x
|
| 159 |
+
|
| 160 |
+
def flops(self) -> float:
|
| 161 |
+
Ho, Wo = self.patches_resolution
|
| 162 |
+
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
|
| 163 |
+
if self.norm is not None:
|
| 164 |
+
flops += Ho * Wo * self.embed_dim
|
| 165 |
+
return flops
|
| 166 |
+
|
| 167 |
+
class Mlp(nn.Module):
|
| 168 |
+
def __init__(
|
| 169 |
+
self,
|
| 170 |
+
in_features: int,
|
| 171 |
+
hidden_features: Optional[int] = None,
|
| 172 |
+
out_features: Optional[int] = None,
|
| 173 |
+
act_layer: Callable[..., nn.Module] = nn.GELU,
|
| 174 |
+
drop: float = 0.0,
|
| 175 |
+
bias: bool = True,
|
| 176 |
+
) -> None:
|
| 177 |
+
super().__init__()
|
| 178 |
+
out_features = out_features or in_features
|
| 179 |
+
hidden_features = hidden_features or in_features
|
| 180 |
+
self.fc1 = nn.Linear(in_features, hidden_features, bias=bias)
|
| 181 |
+
self.act = act_layer()
|
| 182 |
+
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias)
|
| 183 |
+
self.drop = nn.Dropout(drop)
|
| 184 |
+
|
| 185 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 186 |
+
x = self.fc1(x)
|
| 187 |
+
x = self.act(x)
|
| 188 |
+
x = self.drop(x)
|
| 189 |
+
x = self.fc2(x)
|
| 190 |
+
x = self.drop(x)
|
| 191 |
+
return x
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class SwiGLUFFN(nn.Module):
|
| 195 |
+
def __init__(
|
| 196 |
+
self,
|
| 197 |
+
in_features: int,
|
| 198 |
+
hidden_features: Optional[int] = None,
|
| 199 |
+
out_features: Optional[int] = None,
|
| 200 |
+
act_layer: Callable[..., nn.Module] = None,
|
| 201 |
+
drop: float = 0.0,
|
| 202 |
+
bias: bool = True,
|
| 203 |
+
) -> None:
|
| 204 |
+
super().__init__()
|
| 205 |
+
out_features = out_features or in_features
|
| 206 |
+
hidden_features = hidden_features or in_features
|
| 207 |
+
self.w12 = nn.Linear(in_features, 2 * hidden_features, bias=bias)
|
| 208 |
+
self.w3 = nn.Linear(hidden_features, out_features, bias=bias)
|
| 209 |
+
|
| 210 |
+
def forward(self, x: Tensor) -> Tensor:
|
| 211 |
+
x12 = self.w12(x)
|
| 212 |
+
x1, x2 = x12.chunk(2, dim=-1)
|
| 213 |
+
hidden = F.silu(x1) * x2
|
| 214 |
+
return self.w3(hidden)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
from xformers.ops import SwiGLU
|
| 219 |
+
#import numpy.bool
|
| 220 |
+
XFORMERS_AVAILABLE = True
|
| 221 |
+
except ImportError:
|
| 222 |
+
SwiGLU = SwiGLUFFN
|
| 223 |
+
XFORMERS_AVAILABLE = False
|
| 224 |
+
|
| 225 |
+
class SwiGLUFFNFused(SwiGLU):
|
| 226 |
+
def __init__(
|
| 227 |
+
self,
|
| 228 |
+
in_features: int,
|
| 229 |
+
hidden_features: Optional[int] = None,
|
| 230 |
+
out_features: Optional[int] = None,
|
| 231 |
+
act_layer: Callable[..., nn.Module] = None,
|
| 232 |
+
drop: float = 0.0,
|
| 233 |
+
bias: bool = True,
|
| 234 |
+
) -> None:
|
| 235 |
+
out_features = out_features or in_features
|
| 236 |
+
hidden_features = hidden_features or in_features
|
| 237 |
+
hidden_features = (int(hidden_features * 2 / 3) + 7) // 8 * 8
|
| 238 |
+
super().__init__(
|
| 239 |
+
in_features=in_features,
|
| 240 |
+
hidden_features=hidden_features,
|
| 241 |
+
out_features=out_features,
|
| 242 |
+
bias=bias,
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
try:
|
| 247 |
+
from xformers.ops import memory_efficient_attention, unbind, fmha
|
| 248 |
+
from xformers.components.attention import ScaledDotProduct
|
| 249 |
+
from xformers.components import MultiHeadDispatch
|
| 250 |
+
#import numpy.bool
|
| 251 |
+
XFORMERS_AVAILABLE = True
|
| 252 |
+
except ImportError:
|
| 253 |
+
logger.warning("xFormers not available")
|
| 254 |
+
XFORMERS_AVAILABLE = False
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
class Attention(nn.Module):
|
| 258 |
+
def __init__(
|
| 259 |
+
self,
|
| 260 |
+
dim: int,
|
| 261 |
+
num_heads: int = 8,
|
| 262 |
+
qkv_bias: bool = False,
|
| 263 |
+
proj_bias: bool = True,
|
| 264 |
+
attn_drop: float = 0.0,
|
| 265 |
+
proj_drop: float = 0.0,
|
| 266 |
+
window_size: int = 0,
|
| 267 |
+
) -> None:
|
| 268 |
+
super().__init__()
|
| 269 |
+
self.num_heads = num_heads
|
| 270 |
+
head_dim = dim // num_heads
|
| 271 |
+
self.scale = head_dim**-0.5
|
| 272 |
+
|
| 273 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 274 |
+
self.attn_drop = nn.Dropout(attn_drop)
|
| 275 |
+
self.proj = nn.Linear(dim, dim, bias=proj_bias)
|
| 276 |
+
self.proj_drop = nn.Dropout(proj_drop)
|
| 277 |
+
|
| 278 |
+
#if not self.training:
|
| 279 |
+
#
|
| 280 |
+
# self.attn = ScaledDotProduct()
|
| 281 |
+
#self.attn = MultiHeadDispatch(dim_model=EMB, residual_dropout=DROPOUT, num_heads=HEADS, attention=attn)
|
| 282 |
+
|
| 283 |
+
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
|
| 284 |
+
B, N, C = x.shape
|
| 285 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
| 286 |
+
|
| 287 |
+
q, k, v = qkv[0] * self.scale, qkv[1], qkv[2]
|
| 288 |
+
attn = q @ k.transpose(-2, -1)
|
| 289 |
+
|
| 290 |
+
if attn_bias is not None:
|
| 291 |
+
attn = attn + attn_bias[:, :, :N]
|
| 292 |
+
|
| 293 |
+
attn = attn.softmax(dim=-1)
|
| 294 |
+
attn = self.attn_drop(attn)
|
| 295 |
+
|
| 296 |
+
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
| 297 |
+
x = self.proj(x)
|
| 298 |
+
x = self.proj_drop(x)
|
| 299 |
+
return x
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
class MemEffAttention(Attention):
|
| 303 |
+
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
|
| 304 |
+
if not XFORMERS_AVAILABLE:
|
| 305 |
+
#if True:
|
| 306 |
+
assert attn_bias is None, "xFormers is required for nested tensors usage"
|
| 307 |
+
return super().forward(x, attn_bias)
|
| 308 |
+
|
| 309 |
+
B, N, C = x.shape
|
| 310 |
+
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
|
| 311 |
+
|
| 312 |
+
q, k, v = unbind(qkv, 2)
|
| 313 |
+
if attn_bias is not None:
|
| 314 |
+
x = memory_efficient_attention(q, k, v, attn_bias=attn_bias[:, :, :N])
|
| 315 |
+
else:
|
| 316 |
+
x = memory_efficient_attention(q, k, v)
|
| 317 |
+
x = x.reshape([B, N, C])
|
| 318 |
+
|
| 319 |
+
x = self.proj(x)
|
| 320 |
+
x = self.proj_drop(x)
|
| 321 |
+
return x
|
| 322 |
+
|
| 323 |
+
try:
|
| 324 |
+
from xformers.ops import fmha
|
| 325 |
+
from xformers.ops import scaled_index_add, index_select_cat
|
| 326 |
+
#import numpy.bool
|
| 327 |
+
XFORMERS_AVAILABLE = True
|
| 328 |
+
except ImportError:
|
| 329 |
+
logger.warning("xFormers not available")
|
| 330 |
+
XFORMERS_AVAILABLE = False
|
| 331 |
+
|
| 332 |
+
class Block(nn.Module):
|
| 333 |
+
def __init__(
|
| 334 |
+
self,
|
| 335 |
+
dim: int,
|
| 336 |
+
num_heads: int,
|
| 337 |
+
mlp_ratio: float = 4.0,
|
| 338 |
+
qkv_bias: bool = False,
|
| 339 |
+
proj_bias: bool = True,
|
| 340 |
+
ffn_bias: bool = True,
|
| 341 |
+
drop: float = 0.0,
|
| 342 |
+
attn_drop: float = 0.0,
|
| 343 |
+
init_values = None,
|
| 344 |
+
drop_path: float = 0.0,
|
| 345 |
+
act_layer: Callable[..., nn.Module] = nn.GELU,
|
| 346 |
+
norm_layer: Callable[..., nn.Module] = nn.LayerNorm,
|
| 347 |
+
attn_class: Callable[..., nn.Module] = Attention,
|
| 348 |
+
ffn_layer: Callable[..., nn.Module] = Mlp,
|
| 349 |
+
) -> None:
|
| 350 |
+
super().__init__()
|
| 351 |
+
# print(f"biases: qkv: {qkv_bias}, proj: {proj_bias}, ffn: {ffn_bias}")
|
| 352 |
+
self.norm1 = norm_layer(dim)
|
| 353 |
+
self.attn = attn_class(
|
| 354 |
+
dim,
|
| 355 |
+
num_heads=num_heads,
|
| 356 |
+
qkv_bias=qkv_bias,
|
| 357 |
+
proj_bias=proj_bias,
|
| 358 |
+
attn_drop=attn_drop,
|
| 359 |
+
proj_drop=drop,
|
| 360 |
+
)
|
| 361 |
+
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 362 |
+
self.drop_path1 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 363 |
+
|
| 364 |
+
self.norm2 = norm_layer(dim)
|
| 365 |
+
mlp_hidden_dim = int(dim * mlp_ratio)
|
| 366 |
+
self.mlp = ffn_layer(
|
| 367 |
+
in_features=dim,
|
| 368 |
+
hidden_features=mlp_hidden_dim,
|
| 369 |
+
act_layer=act_layer,
|
| 370 |
+
drop=drop,
|
| 371 |
+
bias=ffn_bias,
|
| 372 |
+
)
|
| 373 |
+
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
|
| 374 |
+
self.drop_path2 = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
| 375 |
+
|
| 376 |
+
self.sample_drop_ratio = drop_path
|
| 377 |
+
|
| 378 |
+
def forward(self, x: Tensor, attn_bias=None) -> Tensor:
|
| 379 |
+
def attn_residual_func(x: Tensor, attn_bias) -> Tensor:
|
| 380 |
+
return self.ls1(self.attn(self.norm1(x), attn_bias))
|
| 381 |
+
|
| 382 |
+
def ffn_residual_func(x: Tensor) -> Tensor:
|
| 383 |
+
return self.ls2(self.mlp(self.norm2(x)))
|
| 384 |
+
|
| 385 |
+
if self.training and self.sample_drop_ratio > 0.1:
|
| 386 |
+
# the overhead is compensated only for a drop path rate larger than 0.1
|
| 387 |
+
x = drop_add_residual_stochastic_depth(
|
| 388 |
+
x,
|
| 389 |
+
residual_func=attn_residual_func,
|
| 390 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 391 |
+
attn_bias=attn_bias
|
| 392 |
+
)
|
| 393 |
+
x = drop_add_residual_stochastic_depth(
|
| 394 |
+
x,
|
| 395 |
+
residual_func=ffn_residual_func,
|
| 396 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 397 |
+
)
|
| 398 |
+
elif self.training and self.sample_drop_ratio > 0.0:
|
| 399 |
+
x = x + self.drop_path1(attn_residual_func(x, attn_bias))
|
| 400 |
+
x = x + self.drop_path1(ffn_residual_func(x)) # FIXME: drop_path2
|
| 401 |
+
else:
|
| 402 |
+
x = x + attn_residual_func(x, attn_bias)
|
| 403 |
+
x = x + ffn_residual_func(x)
|
| 404 |
+
return x
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
def drop_add_residual_stochastic_depth(
|
| 408 |
+
x: Tensor,
|
| 409 |
+
residual_func: Callable[[Tensor], Tensor],
|
| 410 |
+
sample_drop_ratio: float = 0.0, attn_bias=None
|
| 411 |
+
) -> Tensor:
|
| 412 |
+
# 1) extract subset using permutation
|
| 413 |
+
b, n, d = x.shape
|
| 414 |
+
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
|
| 415 |
+
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
|
| 416 |
+
x_subset = x[brange]
|
| 417 |
+
|
| 418 |
+
# 2) apply residual_func to get residual
|
| 419 |
+
residual = residual_func(x_subset, attn_bias)
|
| 420 |
+
|
| 421 |
+
x_flat = x.flatten(1)
|
| 422 |
+
residual = residual.flatten(1)
|
| 423 |
+
|
| 424 |
+
residual_scale_factor = b / sample_subset_size
|
| 425 |
+
|
| 426 |
+
# 3) add the residual
|
| 427 |
+
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
|
| 428 |
+
return x_plus_residual.view_as(x)
|
| 429 |
+
|
| 430 |
+
|
| 431 |
+
def get_branges_scales(x, sample_drop_ratio=0.0):
|
| 432 |
+
b, n, d = x.shape
|
| 433 |
+
sample_subset_size = max(int(b * (1 - sample_drop_ratio)), 1)
|
| 434 |
+
brange = (torch.randperm(b, device=x.device))[:sample_subset_size]
|
| 435 |
+
residual_scale_factor = b / sample_subset_size
|
| 436 |
+
return brange, residual_scale_factor
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def add_residual(x, brange, residual, residual_scale_factor, scaling_vector=None):
|
| 440 |
+
if scaling_vector is None:
|
| 441 |
+
x_flat = x.flatten(1)
|
| 442 |
+
residual = residual.flatten(1)
|
| 443 |
+
x_plus_residual = torch.index_add(x_flat, 0, brange, residual.to(dtype=x.dtype), alpha=residual_scale_factor)
|
| 444 |
+
else:
|
| 445 |
+
x_plus_residual = scaled_index_add(
|
| 446 |
+
x, brange, residual.to(dtype=x.dtype), scaling=scaling_vector, alpha=residual_scale_factor
|
| 447 |
+
)
|
| 448 |
+
return x_plus_residual
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
attn_bias_cache: Dict[Tuple, Any] = {}
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
def get_attn_bias_and_cat(x_list, branges=None):
|
| 455 |
+
"""
|
| 456 |
+
this will perform the index select, cat the tensors, and provide the attn_bias from cache
|
| 457 |
+
"""
|
| 458 |
+
batch_sizes = [b.shape[0] for b in branges] if branges is not None else [x.shape[0] for x in x_list]
|
| 459 |
+
all_shapes = tuple((b, x.shape[1]) for b, x in zip(batch_sizes, x_list))
|
| 460 |
+
if all_shapes not in attn_bias_cache.keys():
|
| 461 |
+
seqlens = []
|
| 462 |
+
for b, x in zip(batch_sizes, x_list):
|
| 463 |
+
for _ in range(b):
|
| 464 |
+
seqlens.append(x.shape[1])
|
| 465 |
+
attn_bias = fmha.BlockDiagonalMask.from_seqlens(seqlens)
|
| 466 |
+
attn_bias._batch_sizes = batch_sizes
|
| 467 |
+
attn_bias_cache[all_shapes] = attn_bias
|
| 468 |
+
|
| 469 |
+
if branges is not None:
|
| 470 |
+
cat_tensors = index_select_cat([x.flatten(1) for x in x_list], branges).view(1, -1, x_list[0].shape[-1])
|
| 471 |
+
else:
|
| 472 |
+
tensors_bs1 = tuple(x.reshape([1, -1, *x.shape[2:]]) for x in x_list)
|
| 473 |
+
cat_tensors = torch.cat(tensors_bs1, dim=1)
|
| 474 |
+
|
| 475 |
+
return attn_bias_cache[all_shapes], cat_tensors
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def drop_add_residual_stochastic_depth_list(
|
| 479 |
+
x_list: List[Tensor],
|
| 480 |
+
residual_func: Callable[[Tensor, Any], Tensor],
|
| 481 |
+
sample_drop_ratio: float = 0.0,
|
| 482 |
+
scaling_vector=None,
|
| 483 |
+
) -> Tensor:
|
| 484 |
+
# 1) generate random set of indices for dropping samples in the batch
|
| 485 |
+
branges_scales = [get_branges_scales(x, sample_drop_ratio=sample_drop_ratio) for x in x_list]
|
| 486 |
+
branges = [s[0] for s in branges_scales]
|
| 487 |
+
residual_scale_factors = [s[1] for s in branges_scales]
|
| 488 |
+
|
| 489 |
+
# 2) get attention bias and index+concat the tensors
|
| 490 |
+
attn_bias, x_cat = get_attn_bias_and_cat(x_list, branges)
|
| 491 |
+
|
| 492 |
+
# 3) apply residual_func to get residual, and split the result
|
| 493 |
+
residual_list = attn_bias.split(residual_func(x_cat, attn_bias=attn_bias)) # type: ignore
|
| 494 |
+
|
| 495 |
+
outputs = []
|
| 496 |
+
for x, brange, residual, residual_scale_factor in zip(x_list, branges, residual_list, residual_scale_factors):
|
| 497 |
+
outputs.append(add_residual(x, brange, residual, residual_scale_factor, scaling_vector).view_as(x))
|
| 498 |
+
return outputs
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class NestedTensorBlock(Block):
|
| 502 |
+
def forward_nested(self, x_list: List[Tensor]) -> List[Tensor]:
|
| 503 |
+
"""
|
| 504 |
+
x_list contains a list of tensors to nest together and run
|
| 505 |
+
"""
|
| 506 |
+
assert isinstance(self.attn, MemEffAttention)
|
| 507 |
+
|
| 508 |
+
if self.training and self.sample_drop_ratio > 0.0:
|
| 509 |
+
|
| 510 |
+
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 511 |
+
return self.attn(self.norm1(x), attn_bias=attn_bias)
|
| 512 |
+
|
| 513 |
+
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 514 |
+
return self.mlp(self.norm2(x))
|
| 515 |
+
|
| 516 |
+
x_list = drop_add_residual_stochastic_depth_list(
|
| 517 |
+
x_list,
|
| 518 |
+
residual_func=attn_residual_func,
|
| 519 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 520 |
+
scaling_vector=self.ls1.gamma if isinstance(self.ls1, LayerScale) else None,
|
| 521 |
+
)
|
| 522 |
+
x_list = drop_add_residual_stochastic_depth_list(
|
| 523 |
+
x_list,
|
| 524 |
+
residual_func=ffn_residual_func,
|
| 525 |
+
sample_drop_ratio=self.sample_drop_ratio,
|
| 526 |
+
scaling_vector=self.ls2.gamma if isinstance(self.ls1, LayerScale) else None,
|
| 527 |
+
)
|
| 528 |
+
return x_list
|
| 529 |
+
else:
|
| 530 |
+
|
| 531 |
+
def attn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 532 |
+
return self.ls1(self.attn(self.norm1(x), attn_bias=attn_bias))
|
| 533 |
+
|
| 534 |
+
def ffn_residual_func(x: Tensor, attn_bias=None) -> Tensor:
|
| 535 |
+
return self.ls2(self.mlp(self.norm2(x)))
|
| 536 |
+
|
| 537 |
+
attn_bias, x = get_attn_bias_and_cat(x_list)
|
| 538 |
+
x = x + attn_residual_func(x, attn_bias=attn_bias)
|
| 539 |
+
x = x + ffn_residual_func(x)
|
| 540 |
+
return attn_bias.split(x)
|
| 541 |
+
|
| 542 |
+
def forward(self, x_or_x_list, attn_bias=None):
|
| 543 |
+
if isinstance(x_or_x_list, Tensor):
|
| 544 |
+
return super().forward(x_or_x_list, attn_bias)
|
| 545 |
+
elif isinstance(x_or_x_list, list):
|
| 546 |
+
assert XFORMERS_AVAILABLE, "Please install xFormers for nested tensors usage"
|
| 547 |
+
return self.forward_nested(x_or_x_list)
|
| 548 |
+
else:
|
| 549 |
+
raise AssertionError
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def named_apply(fn: Callable, module: nn.Module, name="", depth_first=True, include_root=False) -> nn.Module:
|
| 553 |
+
if not depth_first and include_root:
|
| 554 |
+
fn(module=module, name=name)
|
| 555 |
+
for child_name, child_module in module.named_children():
|
| 556 |
+
child_name = ".".join((name, child_name)) if name else child_name
|
| 557 |
+
named_apply(fn=fn, module=child_module, name=child_name, depth_first=depth_first, include_root=True)
|
| 558 |
+
if depth_first and include_root:
|
| 559 |
+
fn(module=module, name=name)
|
| 560 |
+
return module
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
class BlockChunk(nn.ModuleList):
|
| 564 |
+
def forward(self, x, others=None):
|
| 565 |
+
for b in self:
|
| 566 |
+
if others == None:
|
| 567 |
+
x = b(x)
|
| 568 |
+
else:
|
| 569 |
+
x = b(x, others)
|
| 570 |
+
return x
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
class DinoVisionTransformer(nn.Module):
|
| 574 |
+
def __init__(
|
| 575 |
+
self,
|
| 576 |
+
img_size=518,
|
| 577 |
+
patch_size=16,
|
| 578 |
+
in_chans=3,
|
| 579 |
+
embed_dim=768,
|
| 580 |
+
depth=12,
|
| 581 |
+
num_heads=12,
|
| 582 |
+
mlp_ratio=4.0,
|
| 583 |
+
qkv_bias=True,
|
| 584 |
+
ffn_bias=True,
|
| 585 |
+
proj_bias=True,
|
| 586 |
+
drop_path_rate=0.0,
|
| 587 |
+
drop_path_uniform=False,
|
| 588 |
+
init_values=1e-5, # for layerscale: None or 0 => no layerscale
|
| 589 |
+
embed_layer=PatchEmbed,
|
| 590 |
+
act_layer=nn.GELU,
|
| 591 |
+
block_fn=Block,
|
| 592 |
+
ffn_layer="mlp",
|
| 593 |
+
block_chunks=1,
|
| 594 |
+
num_register_tokens=0,
|
| 595 |
+
interpolate_antialias=False,
|
| 596 |
+
interpolate_offset=0.1,
|
| 597 |
+
multi_output=False,
|
| 598 |
+
**kwargs
|
| 599 |
+
):
|
| 600 |
+
"""
|
| 601 |
+
Args:
|
| 602 |
+
img_size (int, tuple): input image size
|
| 603 |
+
patch_size (int, tuple): patch size
|
| 604 |
+
in_chans (int): number of input channels
|
| 605 |
+
embed_dim (int): embedding dimension
|
| 606 |
+
depth (int): depth of transformer
|
| 607 |
+
num_heads (int): number of attention heads
|
| 608 |
+
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
|
| 609 |
+
qkv_bias (bool): enable bias for qkv if True
|
| 610 |
+
proj_bias (bool): enable bias for proj in attn if True
|
| 611 |
+
ffn_bias (bool): enable bias for ffn if True
|
| 612 |
+
drop_path_rate (float): stochastic depth rate
|
| 613 |
+
drop_path_uniform (bool): apply uniform drop rate across blocks
|
| 614 |
+
weight_init (str): weight init scheme
|
| 615 |
+
init_values (float): layer-scale init values
|
| 616 |
+
embed_layer (nn.Module): patch embedding layer
|
| 617 |
+
act_layer (nn.Module): MLP activation layer
|
| 618 |
+
block_fn (nn.Module): transformer block class
|
| 619 |
+
ffn_layer (str): "mlp", "swiglu", "swiglufused" or "identity"
|
| 620 |
+
block_chunks: (int) split block sequence into block_chunks units for FSDP wrap
|
| 621 |
+
num_register_tokens: (int) number of extra cls tokens (so-called "registers")
|
| 622 |
+
interpolate_antialias: (str) flag to apply anti-aliasing when interpolating positional embeddings
|
| 623 |
+
interpolate_offset: (float) work-around offset to apply when interpolating positional embeddings
|
| 624 |
+
"""
|
| 625 |
+
super().__init__()
|
| 626 |
+
norm_layer = partial(nn.LayerNorm, eps=1e-6)
|
| 627 |
+
|
| 628 |
+
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
|
| 629 |
+
self.num_tokens = 1
|
| 630 |
+
self.n_blocks = depth
|
| 631 |
+
self.num_heads = num_heads
|
| 632 |
+
self.patch_size = patch_size
|
| 633 |
+
self.num_register_tokens = num_register_tokens
|
| 634 |
+
self.interpolate_antialias = interpolate_antialias
|
| 635 |
+
self.interpolate_offset = interpolate_offset
|
| 636 |
+
|
| 637 |
+
self.patch_embed = embed_layer(img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
| 638 |
+
num_patches = self.patch_embed.num_patches
|
| 639 |
+
|
| 640 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 641 |
+
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
|
| 642 |
+
self.multi_output = multi_output
|
| 643 |
+
assert num_register_tokens >= 0
|
| 644 |
+
self.register_tokens = (
|
| 645 |
+
nn.Parameter(torch.zeros(1, num_register_tokens, embed_dim)) if num_register_tokens else None
|
| 646 |
+
)
|
| 647 |
+
|
| 648 |
+
if drop_path_uniform is True:
|
| 649 |
+
dpr = [drop_path_rate] * depth
|
| 650 |
+
else:
|
| 651 |
+
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
| 652 |
+
|
| 653 |
+
if ffn_layer == "mlp":
|
| 654 |
+
logger.info("using MLP layer as FFN")
|
| 655 |
+
ffn_layer = Mlp
|
| 656 |
+
elif ffn_layer == "swiglufused" or ffn_layer == "swiglu":
|
| 657 |
+
logger.info("using SwiGLU layer as FFN")
|
| 658 |
+
ffn_layer = SwiGLUFFNFused
|
| 659 |
+
elif ffn_layer == "identity":
|
| 660 |
+
logger.info("using Identity layer as FFN")
|
| 661 |
+
|
| 662 |
+
def f(*args, **kwargs):
|
| 663 |
+
return nn.Identity()
|
| 664 |
+
|
| 665 |
+
ffn_layer = f
|
| 666 |
+
else:
|
| 667 |
+
raise NotImplementedError
|
| 668 |
+
|
| 669 |
+
blocks_list = [
|
| 670 |
+
block_fn(
|
| 671 |
+
dim=embed_dim,
|
| 672 |
+
num_heads=num_heads,
|
| 673 |
+
mlp_ratio=mlp_ratio,
|
| 674 |
+
qkv_bias=qkv_bias,
|
| 675 |
+
proj_bias=proj_bias,
|
| 676 |
+
ffn_bias=ffn_bias,
|
| 677 |
+
drop_path=dpr[i],
|
| 678 |
+
norm_layer=norm_layer,
|
| 679 |
+
act_layer=act_layer,
|
| 680 |
+
ffn_layer=ffn_layer,
|
| 681 |
+
init_values=init_values,
|
| 682 |
+
)
|
| 683 |
+
for i in range(depth)
|
| 684 |
+
]
|
| 685 |
+
if block_chunks > 0:
|
| 686 |
+
self.chunked_blocks = True
|
| 687 |
+
chunked_blocks = []
|
| 688 |
+
chunksize = depth // block_chunks
|
| 689 |
+
for i in range(0, depth, chunksize):
|
| 690 |
+
# this is to keep the block index consistent if we chunk the block list
|
| 691 |
+
chunked_blocks.append([nn.Identity()] * i + blocks_list[i : i + chunksize])
|
| 692 |
+
self.blocks = nn.ModuleList([BlockChunk(p) for p in chunked_blocks])
|
| 693 |
+
else:
|
| 694 |
+
self.chunked_blocks = False
|
| 695 |
+
self.blocks = nn.ModuleList(blocks_list)
|
| 696 |
+
|
| 697 |
+
self.norm = norm_layer(embed_dim)
|
| 698 |
+
self.head = nn.Identity()
|
| 699 |
+
|
| 700 |
+
self.mask_token = nn.Parameter(torch.zeros(1, embed_dim))
|
| 701 |
+
|
| 702 |
+
self.init_weights()
|
| 703 |
+
|
| 704 |
+
def init_weights(self):
|
| 705 |
+
trunc_normal_(self.pos_embed, std=0.02)
|
| 706 |
+
nn.init.normal_(self.cls_token, std=1e-6)
|
| 707 |
+
if self.register_tokens is not None:
|
| 708 |
+
nn.init.normal_(self.register_tokens, std=1e-6)
|
| 709 |
+
named_apply(init_weights_vit_timm, self)
|
| 710 |
+
|
| 711 |
+
def interpolate_pos_encoding(self, x, w, h):
|
| 712 |
+
previous_dtype = x.dtype
|
| 713 |
+
npatch = x.shape[1] - 1
|
| 714 |
+
N = self.pos_embed.shape[1] - 1
|
| 715 |
+
if npatch == N and w == h:
|
| 716 |
+
return self.pos_embed
|
| 717 |
+
pos_embed = self.pos_embed.float()
|
| 718 |
+
class_pos_embed = pos_embed[:, 0]
|
| 719 |
+
patch_pos_embed = pos_embed[:, 1:]
|
| 720 |
+
dim = x.shape[-1]
|
| 721 |
+
w0 = w // self.patch_size
|
| 722 |
+
h0 = h // self.patch_size
|
| 723 |
+
# we add a small number to avoid floating point error in the interpolation
|
| 724 |
+
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
| 725 |
+
w0, h0 = w0 + self.interpolate_offset, h0 + self.interpolate_offset
|
| 726 |
+
|
| 727 |
+
sqrt_N = math.sqrt(N)
|
| 728 |
+
sx, sy = float(w0) / sqrt_N, float(h0) / sqrt_N
|
| 729 |
+
patch_pos_embed = nn.functional.interpolate(
|
| 730 |
+
patch_pos_embed.reshape(1, int(sqrt_N), int(sqrt_N), dim).permute(0, 3, 1, 2),
|
| 731 |
+
scale_factor=(sx, sy),
|
| 732 |
+
mode="bicubic",
|
| 733 |
+
antialias=self.interpolate_antialias,
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
assert int(w0) == patch_pos_embed.shape[-2]
|
| 737 |
+
assert int(h0) == patch_pos_embed.shape[-1]
|
| 738 |
+
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
| 739 |
+
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1).to(previous_dtype)
|
| 740 |
+
|
| 741 |
+
def prepare_tokens_with_masks(self, x, masks=None):
|
| 742 |
+
B, nc, w, h = x.shape
|
| 743 |
+
x = self.patch_embed(x)
|
| 744 |
+
if masks is not None:
|
| 745 |
+
x = torch.where(masks.unsqueeze(-1), self.mask_token.to(x.dtype).unsqueeze(0), x)
|
| 746 |
+
|
| 747 |
+
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
|
| 748 |
+
x = x + self.interpolate_pos_encoding(x, w, h)
|
| 749 |
+
|
| 750 |
+
if self.register_tokens is not None:
|
| 751 |
+
x = torch.cat(
|
| 752 |
+
(
|
| 753 |
+
x[:, :1],
|
| 754 |
+
self.register_tokens.expand(x.shape[0], -1, -1),
|
| 755 |
+
x[:, 1:],
|
| 756 |
+
),
|
| 757 |
+
dim=1,
|
| 758 |
+
)
|
| 759 |
+
|
| 760 |
+
return x
|
| 761 |
+
|
| 762 |
+
def forward_features_list(self, x_list, masks_list):
|
| 763 |
+
x = [self.prepare_tokens_with_masks(x, masks) for x, masks in zip(x_list, masks_list)]
|
| 764 |
+
for blk in self.blocks:
|
| 765 |
+
x = blk(x)
|
| 766 |
+
|
| 767 |
+
all_x = x
|
| 768 |
+
output = []
|
| 769 |
+
for x, masks in zip(all_x, masks_list):
|
| 770 |
+
x_norm = self.norm(x)
|
| 771 |
+
output.append(
|
| 772 |
+
{
|
| 773 |
+
"x_norm_clstoken": x_norm[:, 0],
|
| 774 |
+
"x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
|
| 775 |
+
"x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
|
| 776 |
+
"x_prenorm": x,
|
| 777 |
+
"masks": masks,
|
| 778 |
+
}
|
| 779 |
+
)
|
| 780 |
+
return output
|
| 781 |
+
|
| 782 |
+
def forward_features(self, x, masks=None):
|
| 783 |
+
if isinstance(x, list):
|
| 784 |
+
return self.forward_features_list(x, masks)
|
| 785 |
+
|
| 786 |
+
B, C, H, W = x.size()
|
| 787 |
+
pad_h = (self.patch_size - H % self.patch_size)
|
| 788 |
+
pad_w = (self.patch_size - W % self.patch_size)
|
| 789 |
+
if pad_h == self.patch_size:
|
| 790 |
+
pad_h = 0
|
| 791 |
+
if pad_w == self.patch_size:
|
| 792 |
+
pad_w = 0
|
| 793 |
+
#x = nn.functional.pad(x, (pad_h//2, pad_h-pad_h//2, pad_w//2, pad_w-pad_w//2))
|
| 794 |
+
if pad_h + pad_w > 0:
|
| 795 |
+
x = torch.nn.functional.interpolate(x, (H+pad_h, W+pad_w), mode='bilinear')
|
| 796 |
+
|
| 797 |
+
x = self.prepare_tokens_with_masks(x, masks)
|
| 798 |
+
|
| 799 |
+
# return {
|
| 800 |
+
# "x_norm_clstoken": x_norm[:, 0],
|
| 801 |
+
# "x_norm_regtokens": x_norm[:, 1 : self.num_register_tokens + 1],
|
| 802 |
+
# "x_norm_patchtokens": x_norm[:, self.num_register_tokens + 1 :],
|
| 803 |
+
# "x_prenorm": x,
|
| 804 |
+
# "masks": masks,
|
| 805 |
+
# }
|
| 806 |
+
if self.multi_output == False:
|
| 807 |
+
for blk in self.blocks:
|
| 808 |
+
x = blk(x)
|
| 809 |
+
x_norm = self.norm(x)
|
| 810 |
+
features = []
|
| 811 |
+
features.append(x_norm)
|
| 812 |
+
features.append(x_norm)
|
| 813 |
+
features.append(x_norm)
|
| 814 |
+
features.append(x_norm)
|
| 815 |
+
return [features, (B, (H+pad_h)//self.patch_size, (W+pad_w)//self.patch_size, H, W, self.num_register_tokens)]
|
| 816 |
+
else:
|
| 817 |
+
features = []
|
| 818 |
+
for blk in self.blocks:
|
| 819 |
+
for idx, sub_blk in enumerate(blk):
|
| 820 |
+
x = sub_blk(x)
|
| 821 |
+
if (idx + 1) % (len(blk) // 4) == 0:
|
| 822 |
+
features.append(x)
|
| 823 |
+
|
| 824 |
+
return [features, (B, (H+pad_h)//self.patch_size, (W+pad_w)//self.patch_size, H, W, self.num_register_tokens)]
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
def _get_intermediate_layers_not_chunked(self, x, n=1):
|
| 828 |
+
x = self.prepare_tokens_with_masks(x)
|
| 829 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 830 |
+
output, total_block_len = [], len(self.blocks)
|
| 831 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 832 |
+
for i, blk in enumerate(self.blocks):
|
| 833 |
+
x = blk(x)
|
| 834 |
+
if i in blocks_to_take:
|
| 835 |
+
output.append(x)
|
| 836 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 837 |
+
return output
|
| 838 |
+
|
| 839 |
+
def _get_intermediate_layers_chunked(self, x, n=1):
|
| 840 |
+
x = self.prepare_tokens_with_masks(x)
|
| 841 |
+
output, i, total_block_len = [], 0, len(self.blocks[-1])
|
| 842 |
+
# If n is an int, take the n last blocks. If it's a list, take them
|
| 843 |
+
blocks_to_take = range(total_block_len - n, total_block_len) if isinstance(n, int) else n
|
| 844 |
+
for block_chunk in self.blocks:
|
| 845 |
+
for blk in block_chunk[i:]: # Passing the nn.Identity()
|
| 846 |
+
x = blk(x)
|
| 847 |
+
if i in blocks_to_take:
|
| 848 |
+
output.append(x)
|
| 849 |
+
i += 1
|
| 850 |
+
assert len(output) == len(blocks_to_take), f"only {len(output)} / {len(blocks_to_take)} blocks found"
|
| 851 |
+
return output
|
| 852 |
+
|
| 853 |
+
def get_intermediate_layers(
|
| 854 |
+
self,
|
| 855 |
+
x: torch.Tensor,
|
| 856 |
+
n: Union[int, Sequence] = 1, # Layers or n last layers to take
|
| 857 |
+
reshape: bool = False,
|
| 858 |
+
return_class_token: bool = False,
|
| 859 |
+
norm=True,
|
| 860 |
+
) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]]]:
|
| 861 |
+
if self.chunked_blocks:
|
| 862 |
+
outputs = self._get_intermediate_layers_chunked(x, n)
|
| 863 |
+
else:
|
| 864 |
+
outputs = self._get_intermediate_layers_not_chunked(x, n)
|
| 865 |
+
if norm:
|
| 866 |
+
outputs = [self.norm(out) for out in outputs]
|
| 867 |
+
class_tokens = [out[:, 0] for out in outputs]
|
| 868 |
+
outputs = [out[:, 1:] for out in outputs]
|
| 869 |
+
if reshape:
|
| 870 |
+
B, _, w, h = x.shape
|
| 871 |
+
outputs = [
|
| 872 |
+
out.reshape(B, w // self.patch_size, h // self.patch_size, -1).permute(0, 3, 1, 2).contiguous()
|
| 873 |
+
for out in outputs
|
| 874 |
+
]
|
| 875 |
+
if return_class_token:
|
| 876 |
+
return tuple(zip(outputs, class_tokens))
|
| 877 |
+
return tuple(outputs)
|
| 878 |
+
|
| 879 |
+
def forward(self, *args, is_training=False, **kwargs):
|
| 880 |
+
ret = self.forward_features(*args, **kwargs)
|
| 881 |
+
return ret
|
| 882 |
+
# if is_training:
|
| 883 |
+
# return ret
|
| 884 |
+
# else:
|
| 885 |
+
# return self.head(ret["x_norm_clstoken"])
|
| 886 |
+
|
| 887 |
+
|
| 888 |
+
def init_weights_vit_timm(module: nn.Module, name: str = ""):
|
| 889 |
+
"""ViT weight initialization, original timm impl (for reproducibility)"""
|
| 890 |
+
if isinstance(module, nn.Linear):
|
| 891 |
+
trunc_normal_(module.weight, std=0.02)
|
| 892 |
+
if module.bias is not None:
|
| 893 |
+
nn.init.zeros_(module.bias)
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
def load_ckpt_dino(checkpoint, model, reserve_norm=True):
|
| 897 |
+
if checkpoint is not None:
|
| 898 |
+
with open(checkpoint, "rb") as f:
|
| 899 |
+
state_dict = torch.load(f)
|
| 900 |
+
try:
|
| 901 |
+
model.load_state_dict(state_dict, strict=True)
|
| 902 |
+
except:
|
| 903 |
+
new_state_dict = {}
|
| 904 |
+
for key, value in state_dict.items():
|
| 905 |
+
if 'blocks' in key:
|
| 906 |
+
key_new = 'blocks.0' + key[len('blocks'):]
|
| 907 |
+
else:
|
| 908 |
+
key_new = key
|
| 909 |
+
new_state_dict[key_new] = value
|
| 910 |
+
|
| 911 |
+
model.load_state_dict(new_state_dict, strict=True)
|
| 912 |
+
del model.mask_token
|
| 913 |
+
if reserve_norm == False:
|
| 914 |
+
del model.norm
|
| 915 |
+
return
|
| 916 |
+
else:
|
| 917 |
+
return
|
| 918 |
+
|
| 919 |
+
|
| 920 |
+
def vit_small(patch_size=14, num_register_tokens=0, checkpoint=None, **kwargs):
|
| 921 |
+
model = DinoVisionTransformer(
|
| 922 |
+
patch_size=patch_size,
|
| 923 |
+
embed_dim=384,
|
| 924 |
+
depth=12,
|
| 925 |
+
num_heads=6,
|
| 926 |
+
mlp_ratio=4,
|
| 927 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 928 |
+
num_register_tokens=num_register_tokens,
|
| 929 |
+
**kwargs,
|
| 930 |
+
)
|
| 931 |
+
|
| 932 |
+
load_ckpt_dino(checkpoint, model)
|
| 933 |
+
|
| 934 |
+
return model
|
| 935 |
+
|
| 936 |
+
|
| 937 |
+
def vit_base(patch_size=14, num_register_tokens=0, checkpoint=None, **kwargs):
|
| 938 |
+
model = DinoVisionTransformer(
|
| 939 |
+
patch_size=patch_size,
|
| 940 |
+
embed_dim=768,
|
| 941 |
+
depth=12,
|
| 942 |
+
num_heads=12,
|
| 943 |
+
mlp_ratio=4,
|
| 944 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 945 |
+
num_register_tokens=num_register_tokens,
|
| 946 |
+
**kwargs,
|
| 947 |
+
)
|
| 948 |
+
return model
|
| 949 |
+
|
| 950 |
+
|
| 951 |
+
def vit_large(patch_size=14, num_register_tokens=0, checkpoint=None, **kwargs):
|
| 952 |
+
model = DinoVisionTransformer(
|
| 953 |
+
patch_size=patch_size,
|
| 954 |
+
embed_dim=1024,
|
| 955 |
+
depth=24,
|
| 956 |
+
num_heads=16,
|
| 957 |
+
mlp_ratio=4,
|
| 958 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 959 |
+
num_register_tokens=num_register_tokens,
|
| 960 |
+
**kwargs,
|
| 961 |
+
)
|
| 962 |
+
|
| 963 |
+
if checkpoint is not None:
|
| 964 |
+
with open(checkpoint, "rb") as f:
|
| 965 |
+
state_dict = torch.load(f)
|
| 966 |
+
try:
|
| 967 |
+
model.load_state_dict(state_dict, strict=True)
|
| 968 |
+
except:
|
| 969 |
+
new_state_dict = {}
|
| 970 |
+
for key, value in state_dict.items():
|
| 971 |
+
if 'blocks' in key:
|
| 972 |
+
key_new = 'blocks.0' + key[len('blocks'):]
|
| 973 |
+
else:
|
| 974 |
+
key_new = key
|
| 975 |
+
new_state_dict[key_new] = value
|
| 976 |
+
|
| 977 |
+
model.load_state_dict(new_state_dict, strict=True)
|
| 978 |
+
del model.mask_token
|
| 979 |
+
return model
|
| 980 |
+
|
| 981 |
+
|
| 982 |
+
def vit_giant2(patch_size=14, num_register_tokens=0, checkpoint=None, **kwargs):
|
| 983 |
+
"""
|
| 984 |
+
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
|
| 985 |
+
"""
|
| 986 |
+
model = DinoVisionTransformer(
|
| 987 |
+
patch_size=patch_size,
|
| 988 |
+
embed_dim=1536,
|
| 989 |
+
depth=40,
|
| 990 |
+
num_heads=24,
|
| 991 |
+
mlp_ratio=4,
|
| 992 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 993 |
+
num_register_tokens=num_register_tokens,
|
| 994 |
+
ffn_layer='swiglu',
|
| 995 |
+
**kwargs,
|
| 996 |
+
)
|
| 997 |
+
return model
|
| 998 |
+
|
| 999 |
+
|
| 1000 |
+
|
| 1001 |
+
def vit_small_reg(patch_size=14, num_register_tokens=4, checkpoint=None, **kwargs):
|
| 1002 |
+
model = DinoVisionTransformer(
|
| 1003 |
+
patch_size=patch_size,
|
| 1004 |
+
embed_dim=384,
|
| 1005 |
+
depth=12,
|
| 1006 |
+
num_heads=6,
|
| 1007 |
+
mlp_ratio=4,
|
| 1008 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 1009 |
+
num_register_tokens=num_register_tokens,
|
| 1010 |
+
**kwargs,
|
| 1011 |
+
)
|
| 1012 |
+
|
| 1013 |
+
load_ckpt_dino(checkpoint, model)
|
| 1014 |
+
|
| 1015 |
+
return model
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
def vit_base_reg(patch_size=14, num_register_tokens=4, checkpoint=None, **kwargs):
|
| 1019 |
+
model = DinoVisionTransformer(
|
| 1020 |
+
patch_size=patch_size,
|
| 1021 |
+
embed_dim=768,
|
| 1022 |
+
depth=12,
|
| 1023 |
+
num_heads=12,
|
| 1024 |
+
mlp_ratio=4,
|
| 1025 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 1026 |
+
num_register_tokens=num_register_tokens,
|
| 1027 |
+
**kwargs,
|
| 1028 |
+
)
|
| 1029 |
+
|
| 1030 |
+
load_ckpt_dino(checkpoint, model)
|
| 1031 |
+
|
| 1032 |
+
return model
|
| 1033 |
+
|
| 1034 |
+
|
| 1035 |
+
def vit_large_reg(patch_size=14, num_register_tokens=4, checkpoint=None, **kwargs):
|
| 1036 |
+
model = DinoVisionTransformer(
|
| 1037 |
+
img_size = 518,
|
| 1038 |
+
patch_size=patch_size,
|
| 1039 |
+
embed_dim=1024,
|
| 1040 |
+
depth=24,
|
| 1041 |
+
num_heads=16,
|
| 1042 |
+
mlp_ratio=4,
|
| 1043 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 1044 |
+
num_register_tokens=num_register_tokens,
|
| 1045 |
+
**kwargs,
|
| 1046 |
+
)
|
| 1047 |
+
|
| 1048 |
+
load_ckpt_dino(checkpoint, model)
|
| 1049 |
+
|
| 1050 |
+
return model
|
| 1051 |
+
|
| 1052 |
+
|
| 1053 |
+
def vit_giant2_reg(patch_size=14, num_register_tokens=4, checkpoint=None, **kwargs):
|
| 1054 |
+
"""
|
| 1055 |
+
Close to ViT-giant, with embed-dim 1536 and 24 heads => embed-dim per head 64
|
| 1056 |
+
"""
|
| 1057 |
+
model = DinoVisionTransformer(
|
| 1058 |
+
patch_size=patch_size,
|
| 1059 |
+
embed_dim=1536,
|
| 1060 |
+
depth=40,
|
| 1061 |
+
num_heads=24,
|
| 1062 |
+
mlp_ratio=4,
|
| 1063 |
+
block_fn=partial(Block, attn_class=MemEffAttention),
|
| 1064 |
+
num_register_tokens=num_register_tokens,
|
| 1065 |
+
ffn_layer='swiglu',
|
| 1066 |
+
multi_output=True,
|
| 1067 |
+
**kwargs,
|
| 1068 |
+
)
|
| 1069 |
+
|
| 1070 |
+
load_ckpt_dino(checkpoint, model, reserve_norm=False)
|
| 1071 |
+
|
| 1072 |
+
return model
|
| 1073 |
+
|
| 1074 |
+
if __name__ == '__main__':
|
| 1075 |
+
try:
|
| 1076 |
+
from mmcv.utils import Config
|
| 1077 |
+
except:
|
| 1078 |
+
from mmengine import Config
|
| 1079 |
+
|
| 1080 |
+
#rgb = torch.rand((2, 3, 518, 518)).cuda()
|
| 1081 |
+
|
| 1082 |
+
#cfg.data_basic['crop_size']['0']
|
| 1083 |
+
#cfg.data_basic['crop_size']['1']
|
| 1084 |
+
cfg = Config.fromfile('mu.hu/monodepth/mono/configs/RAFTDecoder/vit.raft.full2t.py')
|
| 1085 |
+
|
| 1086 |
+
#rgb = torch.arange(0, 2*3*1036*1036, 1).cuda().float().view(2, 3, 1036, 1036)
|
| 1087 |
+
rgb = torch.zeros(1, 3, 616, 1064).cuda()
|
| 1088 |
+
#model = vit_large_reg(checkpoint="/cpfs02/shared/public/groups/local_map/yvan/pretrained_weight_repo/vit/dinov2_vitl14_reg4_pretrain.pth", kwarg=cfg).cuda()
|
| 1089 |
+
model = vit_giant2_reg(checkpoint="pretrained_weight_repo/vit/dinov2_vitg14_reg4_pretrain.pth", kwarg=cfg).cuda()
|
| 1090 |
+
|
| 1091 |
+
#import timm
|
| 1092 |
+
#model2 = timm.models.vision_transformer.vit_large_patch14_dinov2().cuda()
|
| 1093 |
+
#timm.models.load_checkpoint(model2, '/cpfs02/shared/public/yvan/pretrained_weight_repo/vit/dinov2_vitl14_pretrain.pth', filter_fn=timm.models.vision_transformer.checkpoint_filter_fn)
|
| 1094 |
+
|
| 1095 |
+
out1 = model(rgb)
|
| 1096 |
+
#out2 = model2(rgb)
|
| 1097 |
+
temp = 0
|
| 1098 |
+
|
| 1099 |
+
|
external/Metric3D/training/mono/model/backbones/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .ViT_DINO import vit_large
|
| 2 |
+
from .ViT_DINO_reg import vit_small_reg, vit_large_reg, vit_giant2_reg
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
"vit_small_reg",
|
| 6 |
+
"vit_large_reg",
|
| 7 |
+
"vit_giant2_reg",
|
| 8 |
+
]
|
external/Metric3D/training/mono/model/criterion.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .losses import *
|
| 2 |
+
from mono.utils.comm import get_func
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
def build_from_cfg(cfg, default_args=None):
|
| 6 |
+
"""Build a module from config dict.
|
| 7 |
+
Args:
|
| 8 |
+
cfg (dict): Config dict. It should at least contain the key "type".
|
| 9 |
+
default_args (dict, optional): Default initialization arguments.
|
| 10 |
+
Returns:
|
| 11 |
+
object: The constructed object.
|
| 12 |
+
"""
|
| 13 |
+
if not isinstance(cfg, dict):
|
| 14 |
+
raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
|
| 15 |
+
if 'type' not in cfg:
|
| 16 |
+
raise RuntimeError('should contain the loss name')
|
| 17 |
+
args = cfg.copy()
|
| 18 |
+
|
| 19 |
+
obj_name = args.pop('type')
|
| 20 |
+
obj_path = os.path.dirname(__file__).split(os.getcwd() + '/')[-1].replace('/', '.') + '.losses.' + obj_name
|
| 21 |
+
|
| 22 |
+
obj_cls = get_func(obj_path)(**args)
|
| 23 |
+
|
| 24 |
+
if obj_cls is None:
|
| 25 |
+
raise KeyError(f'cannot find {obj_name}.')
|
| 26 |
+
return obj_cls
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def build_criterions(cfg):
|
| 32 |
+
if 'losses' not in cfg:
|
| 33 |
+
raise RuntimeError('Losses have not been configured.')
|
| 34 |
+
cfg_data_basic = cfg.data_basic
|
| 35 |
+
|
| 36 |
+
criterions = dict()
|
| 37 |
+
losses = cfg.losses
|
| 38 |
+
if not isinstance(losses, dict):
|
| 39 |
+
raise RuntimeError(f'Cannot initial losses with the type {type(losses)}')
|
| 40 |
+
for key, loss_list in losses.items():
|
| 41 |
+
criterions[key] = []
|
| 42 |
+
for loss_cfg_i in loss_list:
|
| 43 |
+
# update the canonical_space configs to the current loss cfg
|
| 44 |
+
loss_cfg_i.update(cfg_data_basic)
|
| 45 |
+
if 'out_channel' in loss_cfg_i:
|
| 46 |
+
loss_cfg_i.update(out_channel=cfg.out_channel) # classification loss need to update the channels
|
| 47 |
+
obj_cls = build_from_cfg(loss_cfg_i)
|
| 48 |
+
criterions[key].append(obj_cls)
|
| 49 |
+
return criterions
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
|
external/Metric3D/training/mono/model/decode_heads/RAFTDepthNormalDPTDecoder5.py
ADDED
|
@@ -0,0 +1,818 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import math
|
| 5 |
+
import torch.nn.functional as F
|
| 6 |
+
|
| 7 |
+
def compute_depth_expectation(prob, depth_values):
|
| 8 |
+
depth_values = depth_values.view(*depth_values.shape, 1, 1)
|
| 9 |
+
depth = torch.sum(prob * depth_values, 1)
|
| 10 |
+
return depth
|
| 11 |
+
|
| 12 |
+
def interpolate_float32(x, size=None, scale_factor=None, mode='nearest', align_corners=None):
|
| 13 |
+
with torch.autocast(device_type='cuda', dtype=torch.bfloat16, enabled=False):
|
| 14 |
+
return F.interpolate(x.float(), size=size, scale_factor=scale_factor, mode=mode, align_corners=align_corners)
|
| 15 |
+
|
| 16 |
+
# def upflow8(flow, mode='bilinear'):
|
| 17 |
+
# new_size = (8 * flow.shape[2], 8 * flow.shape[3])
|
| 18 |
+
# return 8 * F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
|
| 19 |
+
|
| 20 |
+
def upflow4(flow, mode='bilinear'):
|
| 21 |
+
new_size = (4 * flow.shape[2], 4 * flow.shape[3])
|
| 22 |
+
with torch.autocast(device_type='cuda', dtype=torch.bfloat16, enabled=False):
|
| 23 |
+
return F.interpolate(flow, size=new_size, mode=mode, align_corners=True)
|
| 24 |
+
|
| 25 |
+
def coords_grid(batch, ht, wd):
|
| 26 |
+
# coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))
|
| 27 |
+
coords = (torch.zeros((ht, wd)), torch.zeros((ht, wd)), torch.zeros((ht, wd)), torch.zeros((ht, wd)), torch.zeros((ht, wd)), torch.zeros((ht, wd)))
|
| 28 |
+
coords = torch.stack(coords[::-1], dim=0).float()
|
| 29 |
+
return coords[None].repeat(batch, 1, 1, 1)
|
| 30 |
+
|
| 31 |
+
def norm_normalize(norm_out):
|
| 32 |
+
min_kappa = 0.01
|
| 33 |
+
norm_x, norm_y, norm_z, kappa = torch.split(norm_out, 1, dim=1)
|
| 34 |
+
norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10
|
| 35 |
+
kappa = F.elu(kappa) + 1.0 + min_kappa
|
| 36 |
+
final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm, kappa], dim=1)
|
| 37 |
+
return final_out
|
| 38 |
+
|
| 39 |
+
# uncertainty-guided sampling (only used during training)
|
| 40 |
+
@torch.no_grad()
|
| 41 |
+
def sample_points(init_normal, gt_norm_mask, sampling_ratio, beta):
|
| 42 |
+
device = init_normal.device
|
| 43 |
+
B, _, H, W = init_normal.shape
|
| 44 |
+
N = int(sampling_ratio * H * W)
|
| 45 |
+
beta = beta
|
| 46 |
+
|
| 47 |
+
# uncertainty map
|
| 48 |
+
uncertainty_map = -1 * init_normal[:, -1, :, :] # B, H, W
|
| 49 |
+
|
| 50 |
+
# gt_invalid_mask (B, H, W)
|
| 51 |
+
if gt_norm_mask is not None:
|
| 52 |
+
gt_invalid_mask = F.interpolate(gt_norm_mask.float(), size=[H, W], mode='nearest')
|
| 53 |
+
gt_invalid_mask = gt_invalid_mask[:, 0, :, :] < 0.5
|
| 54 |
+
uncertainty_map[gt_invalid_mask] = -1e4
|
| 55 |
+
|
| 56 |
+
# (B, H*W)
|
| 57 |
+
_, idx = uncertainty_map.view(B, -1).sort(1, descending=True)
|
| 58 |
+
|
| 59 |
+
# importance sampling
|
| 60 |
+
if int(beta * N) > 0:
|
| 61 |
+
importance = idx[:, :int(beta * N)] # B, beta*N
|
| 62 |
+
|
| 63 |
+
# remaining
|
| 64 |
+
remaining = idx[:, int(beta * N):] # B, H*W - beta*N
|
| 65 |
+
|
| 66 |
+
# coverage
|
| 67 |
+
num_coverage = N - int(beta * N)
|
| 68 |
+
|
| 69 |
+
if num_coverage <= 0:
|
| 70 |
+
samples = importance
|
| 71 |
+
else:
|
| 72 |
+
coverage_list = []
|
| 73 |
+
for i in range(B):
|
| 74 |
+
idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
|
| 75 |
+
coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
|
| 76 |
+
coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
|
| 77 |
+
samples = torch.cat((importance, coverage), dim=1) # B, N
|
| 78 |
+
|
| 79 |
+
else:
|
| 80 |
+
# remaining
|
| 81 |
+
remaining = idx[:, :] # B, H*W
|
| 82 |
+
|
| 83 |
+
# coverage
|
| 84 |
+
num_coverage = N
|
| 85 |
+
|
| 86 |
+
coverage_list = []
|
| 87 |
+
for i in range(B):
|
| 88 |
+
idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
|
| 89 |
+
coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
|
| 90 |
+
coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
|
| 91 |
+
samples = coverage
|
| 92 |
+
|
| 93 |
+
# point coordinates
|
| 94 |
+
rows_int = samples // W # 0 for first row, H-1 for last row
|
| 95 |
+
rows_float = rows_int / float(H-1) # 0 to 1.0
|
| 96 |
+
rows_float = (rows_float * 2.0) - 1.0 # -1.0 to 1.0
|
| 97 |
+
|
| 98 |
+
cols_int = samples % W # 0 for first column, W-1 for last column
|
| 99 |
+
cols_float = cols_int / float(W-1) # 0 to 1.0
|
| 100 |
+
cols_float = (cols_float * 2.0) - 1.0 # -1.0 to 1.0
|
| 101 |
+
|
| 102 |
+
point_coords = torch.zeros(B, 1, N, 2)
|
| 103 |
+
point_coords[:, 0, :, 0] = cols_float # x coord
|
| 104 |
+
point_coords[:, 0, :, 1] = rows_float # y coord
|
| 105 |
+
point_coords = point_coords.to(device)
|
| 106 |
+
return point_coords, rows_int, cols_int
|
| 107 |
+
|
| 108 |
+
class FlowHead(nn.Module):
|
| 109 |
+
def __init__(self, input_dim=128, hidden_dim=256, output_dim_depth=2, output_dim_norm=4):
|
| 110 |
+
super(FlowHead, self).__init__()
|
| 111 |
+
self.conv1d = nn.Conv2d(input_dim, hidden_dim // 2, 3, padding=1)
|
| 112 |
+
self.conv2d = nn.Conv2d(hidden_dim // 2, output_dim_depth, 3, padding=1)
|
| 113 |
+
|
| 114 |
+
self.conv1n = nn.Conv2d(input_dim, hidden_dim // 2, 3, padding=1)
|
| 115 |
+
self.conv2n = nn.Conv2d(hidden_dim // 2, output_dim_norm, 3, padding=1)
|
| 116 |
+
self.relu = nn.ReLU(inplace=True)
|
| 117 |
+
|
| 118 |
+
def forward(self, x):
|
| 119 |
+
depth = self.conv2d(self.relu(self.conv1d(x)))
|
| 120 |
+
normal = self.conv2n(self.relu(self.conv1n(x)))
|
| 121 |
+
return torch.cat((depth, normal), dim=1)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
class ConvGRU(nn.Module):
|
| 125 |
+
def __init__(self, hidden_dim, input_dim, kernel_size=3):
|
| 126 |
+
super(ConvGRU, self).__init__()
|
| 127 |
+
self.convz = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
|
| 128 |
+
self.convr = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
|
| 129 |
+
self.convq = nn.Conv2d(hidden_dim+input_dim, hidden_dim, kernel_size, padding=kernel_size//2)
|
| 130 |
+
|
| 131 |
+
def forward(self, h, cz, cr, cq, *x_list):
|
| 132 |
+
x = torch.cat(x_list, dim=1)
|
| 133 |
+
hx = torch.cat([h, x], dim=1)
|
| 134 |
+
|
| 135 |
+
z = torch.sigmoid((self.convz(hx) + cz))
|
| 136 |
+
r = torch.sigmoid((self.convr(hx) + cr))
|
| 137 |
+
q = torch.tanh((self.convq(torch.cat([r*h, x], dim=1)) + cq))
|
| 138 |
+
|
| 139 |
+
# z = torch.sigmoid((self.convz(hx) + cz).float())
|
| 140 |
+
# r = torch.sigmoid((self.convr(hx) + cr).float())
|
| 141 |
+
# q = torch.tanh((self.convq(torch.cat([r*h, x], dim=1)) + cq).float())
|
| 142 |
+
|
| 143 |
+
h = (1-z) * h + z * q
|
| 144 |
+
return h
|
| 145 |
+
|
| 146 |
+
def pool2x(x):
|
| 147 |
+
return F.avg_pool2d(x, 3, stride=2, padding=1)
|
| 148 |
+
|
| 149 |
+
def pool4x(x):
|
| 150 |
+
return F.avg_pool2d(x, 5, stride=4, padding=1)
|
| 151 |
+
|
| 152 |
+
def interp(x, dest):
|
| 153 |
+
interp_args = {'mode': 'bilinear', 'align_corners': True}
|
| 154 |
+
return interpolate_float32(x, dest.shape[2:], **interp_args)
|
| 155 |
+
|
| 156 |
+
class BasicMultiUpdateBlock(nn.Module):
|
| 157 |
+
def __init__(self, args, hidden_dims=[], out_dims=2):
|
| 158 |
+
super().__init__()
|
| 159 |
+
self.args = args
|
| 160 |
+
self.n_gru_layers = args.model.decode_head.n_gru_layers # 3
|
| 161 |
+
self.n_downsample = args.model.decode_head.n_downsample # 3, resolution of the disparity field (1/2^K)
|
| 162 |
+
|
| 163 |
+
# self.encoder = BasicMotionEncoder(args)
|
| 164 |
+
# encoder_output_dim = 128 # if there is corr volume
|
| 165 |
+
encoder_output_dim = 6 # no corr volume
|
| 166 |
+
|
| 167 |
+
self.gru08 = ConvGRU(hidden_dims[2], encoder_output_dim + hidden_dims[1] * (self.n_gru_layers > 1))
|
| 168 |
+
self.gru16 = ConvGRU(hidden_dims[1], hidden_dims[0] * (self.n_gru_layers == 3) + hidden_dims[2])
|
| 169 |
+
self.gru32 = ConvGRU(hidden_dims[0], hidden_dims[1])
|
| 170 |
+
self.flow_head = FlowHead(hidden_dims[2], hidden_dim=2*hidden_dims[2])
|
| 171 |
+
factor = 2**self.n_downsample
|
| 172 |
+
|
| 173 |
+
self.mask = nn.Sequential(
|
| 174 |
+
nn.Conv2d(hidden_dims[2], hidden_dims[2], 3, padding=1),
|
| 175 |
+
nn.ReLU(inplace=True),
|
| 176 |
+
nn.Conv2d(hidden_dims[2], (factor**2)*9, 1, padding=0))
|
| 177 |
+
|
| 178 |
+
def forward(self, net, inp, corr=None, flow=None, iter08=True, iter16=True, iter32=True, update=True):
|
| 179 |
+
|
| 180 |
+
if iter32:
|
| 181 |
+
net[2] = self.gru32(net[2], *(inp[2]), pool2x(net[1]))
|
| 182 |
+
if iter16:
|
| 183 |
+
if self.n_gru_layers > 2:
|
| 184 |
+
net[1] = self.gru16(net[1], *(inp[1]), interp(pool2x(net[0]), net[1]), interp(net[2], net[1]))
|
| 185 |
+
else:
|
| 186 |
+
net[1] = self.gru16(net[1], *(inp[1]), interp(pool2x(net[0]), net[1]))
|
| 187 |
+
if iter08:
|
| 188 |
+
if corr is not None:
|
| 189 |
+
motion_features = self.encoder(flow, corr)
|
| 190 |
+
else:
|
| 191 |
+
motion_features = flow
|
| 192 |
+
if self.n_gru_layers > 1:
|
| 193 |
+
net[0] = self.gru08(net[0], *(inp[0]), motion_features, interp(net[1], net[0]))
|
| 194 |
+
else:
|
| 195 |
+
net[0] = self.gru08(net[0], *(inp[0]), motion_features)
|
| 196 |
+
|
| 197 |
+
if not update:
|
| 198 |
+
return net
|
| 199 |
+
|
| 200 |
+
delta_flow = self.flow_head(net[0])
|
| 201 |
+
|
| 202 |
+
# scale mask to balence gradients
|
| 203 |
+
mask = .25 * self.mask(net[0])
|
| 204 |
+
return net, mask, delta_flow
|
| 205 |
+
|
| 206 |
+
class LayerNorm2d(nn.LayerNorm):
|
| 207 |
+
def __init__(self, dim):
|
| 208 |
+
super(LayerNorm2d, self).__init__(dim)
|
| 209 |
+
|
| 210 |
+
def forward(self, x):
|
| 211 |
+
x = x.permute(0, 2, 3, 1).contiguous()
|
| 212 |
+
x = super(LayerNorm2d, self).forward(x)
|
| 213 |
+
x = x.permute(0, 3, 1, 2).contiguous()
|
| 214 |
+
return x
|
| 215 |
+
|
| 216 |
+
class ResidualBlock(nn.Module):
|
| 217 |
+
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
|
| 218 |
+
super(ResidualBlock, self).__init__()
|
| 219 |
+
|
| 220 |
+
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
|
| 221 |
+
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
|
| 222 |
+
self.relu = nn.ReLU(inplace=True)
|
| 223 |
+
|
| 224 |
+
num_groups = planes // 8
|
| 225 |
+
|
| 226 |
+
if norm_fn == 'group':
|
| 227 |
+
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
|
| 228 |
+
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
|
| 229 |
+
if not (stride == 1 and in_planes == planes):
|
| 230 |
+
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
|
| 231 |
+
|
| 232 |
+
elif norm_fn == 'batch':
|
| 233 |
+
self.norm1 = nn.BatchNorm2d(planes)
|
| 234 |
+
self.norm2 = nn.BatchNorm2d(planes)
|
| 235 |
+
if not (stride == 1 and in_planes == planes):
|
| 236 |
+
self.norm3 = nn.BatchNorm2d(planes)
|
| 237 |
+
|
| 238 |
+
elif norm_fn == 'instance':
|
| 239 |
+
self.norm1 = nn.InstanceNorm2d(planes)
|
| 240 |
+
self.norm2 = nn.InstanceNorm2d(planes)
|
| 241 |
+
if not (stride == 1 and in_planes == planes):
|
| 242 |
+
self.norm3 = nn.InstanceNorm2d(planes)
|
| 243 |
+
|
| 244 |
+
elif norm_fn == 'layer':
|
| 245 |
+
self.norm1 = LayerNorm2d(planes)
|
| 246 |
+
self.norm2 = LayerNorm2d(planes)
|
| 247 |
+
if not (stride == 1 and in_planes == planes):
|
| 248 |
+
self.norm3 = LayerNorm2d(planes)
|
| 249 |
+
|
| 250 |
+
elif norm_fn == 'none':
|
| 251 |
+
self.norm1 = nn.Sequential()
|
| 252 |
+
self.norm2 = nn.Sequential()
|
| 253 |
+
if not (stride == 1 and in_planes == planes):
|
| 254 |
+
self.norm3 = nn.Sequential()
|
| 255 |
+
|
| 256 |
+
if stride == 1 and in_planes == planes:
|
| 257 |
+
self.downsample = None
|
| 258 |
+
|
| 259 |
+
else:
|
| 260 |
+
self.downsample = nn.Sequential(
|
| 261 |
+
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
|
| 262 |
+
|
| 263 |
+
def forward(self, x):
|
| 264 |
+
y = x
|
| 265 |
+
y = self.conv1(y)
|
| 266 |
+
y = self.norm1(y)
|
| 267 |
+
y = self.relu(y)
|
| 268 |
+
y = self.conv2(y)
|
| 269 |
+
y = self.norm2(y)
|
| 270 |
+
y = self.relu(y)
|
| 271 |
+
|
| 272 |
+
if self.downsample is not None:
|
| 273 |
+
x = self.downsample(x)
|
| 274 |
+
|
| 275 |
+
return self.relu(x+y)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
class ContextFeatureEncoder(nn.Module):
|
| 279 |
+
'''
|
| 280 |
+
Encoder features are used to:
|
| 281 |
+
1. initialize the hidden state of the update operator
|
| 282 |
+
2. and also injected into the GRU during each iteration of the update operator
|
| 283 |
+
'''
|
| 284 |
+
def __init__(self, in_dim, output_dim):
|
| 285 |
+
'''
|
| 286 |
+
in_dim = [x4, x8, x16, x32]
|
| 287 |
+
output_dim = [hindden_dims, context_dims]
|
| 288 |
+
[[x4,x8,x16,x32],[x4,x8,x16,x32]]
|
| 289 |
+
'''
|
| 290 |
+
super().__init__()
|
| 291 |
+
|
| 292 |
+
output_list = []
|
| 293 |
+
for dim in output_dim:
|
| 294 |
+
conv_out = nn.Sequential(
|
| 295 |
+
ResidualBlock(in_dim[0], dim[0], 'layer', stride=1),
|
| 296 |
+
nn.Conv2d(dim[0], dim[0], 3, padding=1))
|
| 297 |
+
output_list.append(conv_out)
|
| 298 |
+
|
| 299 |
+
self.outputs04 = nn.ModuleList(output_list)
|
| 300 |
+
|
| 301 |
+
output_list = []
|
| 302 |
+
for dim in output_dim:
|
| 303 |
+
conv_out = nn.Sequential(
|
| 304 |
+
ResidualBlock(in_dim[1], dim[1], 'layer', stride=1),
|
| 305 |
+
nn.Conv2d(dim[1], dim[1], 3, padding=1))
|
| 306 |
+
output_list.append(conv_out)
|
| 307 |
+
|
| 308 |
+
self.outputs08 = nn.ModuleList(output_list)
|
| 309 |
+
|
| 310 |
+
output_list = []
|
| 311 |
+
for dim in output_dim:
|
| 312 |
+
conv_out = nn.Sequential(
|
| 313 |
+
ResidualBlock(in_dim[2], dim[2], 'layer', stride=1),
|
| 314 |
+
nn.Conv2d(dim[2], dim[2], 3, padding=1))
|
| 315 |
+
output_list.append(conv_out)
|
| 316 |
+
|
| 317 |
+
self.outputs16 = nn.ModuleList(output_list)
|
| 318 |
+
|
| 319 |
+
# output_list = []
|
| 320 |
+
# for dim in output_dim:
|
| 321 |
+
# conv_out = nn.Conv2d(in_dim[3], dim[3], 3, padding=1)
|
| 322 |
+
# output_list.append(conv_out)
|
| 323 |
+
|
| 324 |
+
# self.outputs32 = nn.ModuleList(output_list)
|
| 325 |
+
|
| 326 |
+
def forward(self, encoder_features):
|
| 327 |
+
x_4, x_8, x_16, x_32 = encoder_features
|
| 328 |
+
|
| 329 |
+
outputs04 = [f(x_4) for f in self.outputs04]
|
| 330 |
+
outputs08 = [f(x_8) for f in self.outputs08]
|
| 331 |
+
outputs16 = [f(x_16)for f in self.outputs16]
|
| 332 |
+
# outputs32 = [f(x_32) for f in self.outputs32]
|
| 333 |
+
|
| 334 |
+
return (outputs04, outputs08, outputs16)
|
| 335 |
+
|
| 336 |
+
class ConvBlock(nn.Module):
|
| 337 |
+
# reimplementation of DPT
|
| 338 |
+
def __init__(self, channels):
|
| 339 |
+
super(ConvBlock, self).__init__()
|
| 340 |
+
|
| 341 |
+
self.act = nn.ReLU(inplace=True)
|
| 342 |
+
self.conv1 = nn.Conv2d(
|
| 343 |
+
channels,
|
| 344 |
+
channels,
|
| 345 |
+
kernel_size=3,
|
| 346 |
+
stride=1,
|
| 347 |
+
padding=1
|
| 348 |
+
)
|
| 349 |
+
self.conv2 = nn.Conv2d(
|
| 350 |
+
channels,
|
| 351 |
+
channels,
|
| 352 |
+
kernel_size=3,
|
| 353 |
+
stride=1,
|
| 354 |
+
padding=1
|
| 355 |
+
)
|
| 356 |
+
|
| 357 |
+
def forward(self, x):
|
| 358 |
+
out = self.act(x)
|
| 359 |
+
out = self.conv1(out)
|
| 360 |
+
out = self.act(out)
|
| 361 |
+
out = self.conv2(out)
|
| 362 |
+
return x + out
|
| 363 |
+
|
| 364 |
+
class FuseBlock(nn.Module):
|
| 365 |
+
# reimplementation of DPT
|
| 366 |
+
def __init__(self, in_channels, out_channels, fuse=True, upsample=True, scale_factor=2):
|
| 367 |
+
super(FuseBlock, self).__init__()
|
| 368 |
+
|
| 369 |
+
self.fuse = fuse
|
| 370 |
+
self.scale_factor = scale_factor
|
| 371 |
+
self.way_trunk = ConvBlock(in_channels)
|
| 372 |
+
if self.fuse:
|
| 373 |
+
self.way_branch = ConvBlock(in_channels)
|
| 374 |
+
|
| 375 |
+
self.out_conv = nn.Conv2d(
|
| 376 |
+
in_channels,
|
| 377 |
+
out_channels,
|
| 378 |
+
kernel_size=1,
|
| 379 |
+
stride=1,
|
| 380 |
+
padding=0,
|
| 381 |
+
)
|
| 382 |
+
self.upsample = upsample
|
| 383 |
+
|
| 384 |
+
def forward(self, x1, x2=None):
|
| 385 |
+
if x2 is not None:
|
| 386 |
+
x2 = self.way_branch(x2)
|
| 387 |
+
x1 = x1 + x2
|
| 388 |
+
|
| 389 |
+
out = self.way_trunk(x1)
|
| 390 |
+
|
| 391 |
+
if self.upsample:
|
| 392 |
+
out = interpolate_float32(
|
| 393 |
+
out, scale_factor=self.scale_factor, mode="bilinear", align_corners=True
|
| 394 |
+
)
|
| 395 |
+
out = self.out_conv(out)
|
| 396 |
+
return out
|
| 397 |
+
|
| 398 |
+
class Readout(nn.Module):
|
| 399 |
+
# From DPT
|
| 400 |
+
def __init__(self, in_features, use_cls_token=True, num_register_tokens=0):
|
| 401 |
+
super(Readout, self).__init__()
|
| 402 |
+
self.use_cls_token = use_cls_token
|
| 403 |
+
if self.use_cls_token == True:
|
| 404 |
+
self.project_patch = nn.Linear(in_features, in_features)
|
| 405 |
+
self.project_learn = nn.Linear((1 + num_register_tokens) * in_features, in_features, bias=False)
|
| 406 |
+
self.act = nn.GELU()
|
| 407 |
+
else:
|
| 408 |
+
self.project = nn.Identity()
|
| 409 |
+
|
| 410 |
+
def forward(self, x):
|
| 411 |
+
|
| 412 |
+
if self.use_cls_token == True:
|
| 413 |
+
x_patch = self.project_patch(x[0])
|
| 414 |
+
x_learn = self.project_learn(x[1])
|
| 415 |
+
x_learn = x_learn.expand_as(x_patch).contiguous()
|
| 416 |
+
features = x_patch + x_learn
|
| 417 |
+
return self.act(features)
|
| 418 |
+
else:
|
| 419 |
+
return self.project(x)
|
| 420 |
+
|
| 421 |
+
class Token2Feature(nn.Module):
|
| 422 |
+
# From DPT
|
| 423 |
+
def __init__(self, vit_channel, feature_channel, scale_factor, use_cls_token=True, num_register_tokens=0):
|
| 424 |
+
super(Token2Feature, self).__init__()
|
| 425 |
+
self.scale_factor = scale_factor
|
| 426 |
+
self.readoper = Readout(in_features=vit_channel, use_cls_token=use_cls_token, num_register_tokens=num_register_tokens)
|
| 427 |
+
if scale_factor > 1 and isinstance(scale_factor, int):
|
| 428 |
+
self.sample = nn.ConvTranspose2d(
|
| 429 |
+
in_channels=vit_channel,
|
| 430 |
+
out_channels=feature_channel,
|
| 431 |
+
kernel_size=scale_factor,
|
| 432 |
+
stride=scale_factor,
|
| 433 |
+
padding=0,
|
| 434 |
+
)
|
| 435 |
+
|
| 436 |
+
elif scale_factor > 1:
|
| 437 |
+
self.sample = nn.Sequential(
|
| 438 |
+
# Upsample2(upscale=scale_factor),
|
| 439 |
+
# nn.Upsample(scale_factor=scale_factor),
|
| 440 |
+
nn.Conv2d(
|
| 441 |
+
in_channels=vit_channel,
|
| 442 |
+
out_channels=feature_channel,
|
| 443 |
+
kernel_size=1,
|
| 444 |
+
stride=1,
|
| 445 |
+
padding=0,
|
| 446 |
+
),
|
| 447 |
+
)
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
elif scale_factor < 1:
|
| 451 |
+
scale_factor = int(1.0 / scale_factor)
|
| 452 |
+
self.sample = nn.Conv2d(
|
| 453 |
+
in_channels=vit_channel,
|
| 454 |
+
out_channels=feature_channel,
|
| 455 |
+
kernel_size=scale_factor+1,
|
| 456 |
+
stride=scale_factor,
|
| 457 |
+
padding=1,
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
else:
|
| 461 |
+
self.sample = nn.Identity()
|
| 462 |
+
|
| 463 |
+
def forward(self, x):
|
| 464 |
+
x = self.readoper(x)
|
| 465 |
+
#if use_cls_token == True:
|
| 466 |
+
x = x.permute(0, 3, 1, 2).contiguous()
|
| 467 |
+
if isinstance(self.scale_factor, float):
|
| 468 |
+
x = interpolate_float32(x.float(), scale_factor=self.scale_factor, mode='nearest')
|
| 469 |
+
x = self.sample(x)
|
| 470 |
+
return x
|
| 471 |
+
|
| 472 |
+
class EncoderFeature(nn.Module):
|
| 473 |
+
def __init__(self, vit_channel, num_ch_dec=[256, 512, 1024, 1024], use_cls_token=True, num_register_tokens=0):
|
| 474 |
+
super(EncoderFeature, self).__init__()
|
| 475 |
+
self.vit_channel = vit_channel
|
| 476 |
+
self.num_ch_dec = num_ch_dec
|
| 477 |
+
|
| 478 |
+
self.read_3 = Token2Feature(self.vit_channel, self.num_ch_dec[3], scale_factor=1, use_cls_token=use_cls_token, num_register_tokens=num_register_tokens)
|
| 479 |
+
self.read_2 = Token2Feature(self.vit_channel, self.num_ch_dec[2], scale_factor=1, use_cls_token=use_cls_token, num_register_tokens=num_register_tokens)
|
| 480 |
+
self.read_1 = Token2Feature(self.vit_channel, self.num_ch_dec[1], scale_factor=2, use_cls_token=use_cls_token, num_register_tokens=num_register_tokens)
|
| 481 |
+
self.read_0 = Token2Feature(self.vit_channel, self.num_ch_dec[0], scale_factor=7/2, use_cls_token=use_cls_token, num_register_tokens=num_register_tokens)
|
| 482 |
+
|
| 483 |
+
def forward(self, ref_feature):
|
| 484 |
+
x = self.read_3(ref_feature[3]) # 1/14
|
| 485 |
+
x2 = self.read_2(ref_feature[2]) # 1/14
|
| 486 |
+
x1 = self.read_1(ref_feature[1]) # 1/7
|
| 487 |
+
x0 = self.read_0(ref_feature[0]) # 1/4
|
| 488 |
+
|
| 489 |
+
return x, x2, x1, x0
|
| 490 |
+
|
| 491 |
+
class DecoderFeature(nn.Module):
|
| 492 |
+
def __init__(self, vit_channel, num_ch_dec=[128, 256, 512, 1024, 1024], use_cls_token=True):
|
| 493 |
+
super(DecoderFeature, self).__init__()
|
| 494 |
+
self.vit_channel = vit_channel
|
| 495 |
+
self.num_ch_dec = num_ch_dec
|
| 496 |
+
|
| 497 |
+
self.upconv_3 = FuseBlock(
|
| 498 |
+
self.num_ch_dec[4],
|
| 499 |
+
self.num_ch_dec[3],
|
| 500 |
+
fuse=False, upsample=False)
|
| 501 |
+
|
| 502 |
+
self.upconv_2 = FuseBlock(
|
| 503 |
+
self.num_ch_dec[3],
|
| 504 |
+
self.num_ch_dec[2],
|
| 505 |
+
)
|
| 506 |
+
|
| 507 |
+
self.upconv_1 = FuseBlock(
|
| 508 |
+
self.num_ch_dec[2],
|
| 509 |
+
self.num_ch_dec[1] + 2,
|
| 510 |
+
scale_factor=7/4
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
# self.upconv_0 = FuseBlock(
|
| 514 |
+
# self.num_ch_dec[1],
|
| 515 |
+
# self.num_ch_dec[0] + 1,
|
| 516 |
+
# )
|
| 517 |
+
|
| 518 |
+
def forward(self, ref_feature):
|
| 519 |
+
x, x2, x1, x0 = ref_feature # 1/14 1/14 1/7 1/4
|
| 520 |
+
|
| 521 |
+
x = self.upconv_3(x) # 1/14
|
| 522 |
+
x = self.upconv_2(x, x2) # 1/7
|
| 523 |
+
x = self.upconv_1(x, x1) # 1/4
|
| 524 |
+
# x = self.upconv_0(x, x0) # 4/7
|
| 525 |
+
return x
|
| 526 |
+
|
| 527 |
+
class RAFTDepthNormalDPT5(nn.Module):
|
| 528 |
+
def __init__(self, cfg):
|
| 529 |
+
super().__init__()
|
| 530 |
+
self.in_channels = cfg.model.decode_head.in_channels # [1024, 1024, 1024, 1024]
|
| 531 |
+
self.feature_channels = cfg.model.decode_head.feature_channels # [256, 512, 1024, 1024] [2/7, 1/7, 1/14, 1/14]
|
| 532 |
+
self.decoder_channels = cfg.model.decode_head.decoder_channels # [128, 256, 512, 1024, 1024] [-, 1/4, 1/7, 1/14, 1/14]
|
| 533 |
+
self.use_cls_token = cfg.model.decode_head.use_cls_token
|
| 534 |
+
self.up_scale = cfg.model.decode_head.up_scale
|
| 535 |
+
self.num_register_tokens = cfg.model.decode_head.num_register_tokens
|
| 536 |
+
self.min_val = cfg.data_basic.depth_normalize[0]
|
| 537 |
+
self.max_val = cfg.data_basic.depth_normalize[1]
|
| 538 |
+
self.regress_scale = 100.0
|
| 539 |
+
|
| 540 |
+
self.hidden_dims = self.context_dims = cfg.model.decode_head.hidden_channels # [128, 128, 128, 128]
|
| 541 |
+
self.n_gru_layers = cfg.model.decode_head.n_gru_layers # 3
|
| 542 |
+
self.n_downsample = cfg.model.decode_head.n_downsample # 3, resolution of the disparity field (1/2^K)
|
| 543 |
+
self.iters = cfg.model.decode_head.iters # 22
|
| 544 |
+
self.slow_fast_gru = cfg.model.decode_head.slow_fast_gru # True
|
| 545 |
+
|
| 546 |
+
self.num_depth_regressor_anchor = 256 # 512
|
| 547 |
+
self.used_res_channel = self.decoder_channels[1] # now, use 2/7 res
|
| 548 |
+
self.token2feature = EncoderFeature(self.in_channels[0], self.feature_channels, self.use_cls_token, self.num_register_tokens)
|
| 549 |
+
self.decoder_mono = DecoderFeature(self.in_channels, self.decoder_channels)
|
| 550 |
+
self.depth_regressor = nn.Sequential(
|
| 551 |
+
nn.Conv2d(self.used_res_channel,
|
| 552 |
+
self.num_depth_regressor_anchor,
|
| 553 |
+
kernel_size=3,
|
| 554 |
+
padding=1),
|
| 555 |
+
# nn.BatchNorm2d(self.num_depth_regressor_anchor),
|
| 556 |
+
nn.ReLU(inplace=True),
|
| 557 |
+
nn.Conv2d(self.num_depth_regressor_anchor,
|
| 558 |
+
self.num_depth_regressor_anchor,
|
| 559 |
+
kernel_size=1),
|
| 560 |
+
)
|
| 561 |
+
self.normal_predictor = nn.Sequential(
|
| 562 |
+
nn.Conv2d(self.used_res_channel,
|
| 563 |
+
128,
|
| 564 |
+
kernel_size=3,
|
| 565 |
+
padding=1),
|
| 566 |
+
# nn.BatchNorm2d(128),
|
| 567 |
+
nn.ReLU(inplace=True),
|
| 568 |
+
nn.Conv2d(128, 128, kernel_size=1), nn.ReLU(inplace=True),
|
| 569 |
+
nn.Conv2d(128, 128, kernel_size=1), nn.ReLU(inplace=True),
|
| 570 |
+
nn.Conv2d(128, 3, kernel_size=1),
|
| 571 |
+
)
|
| 572 |
+
|
| 573 |
+
self.context_feature_encoder = ContextFeatureEncoder(self.feature_channels, [self.hidden_dims, self.context_dims])
|
| 574 |
+
self.context_zqr_convs = nn.ModuleList([nn.Conv2d(self.context_dims[i], self.hidden_dims[i]*3, 3, padding=3//2) for i in range(self.n_gru_layers)])
|
| 575 |
+
self.update_block = BasicMultiUpdateBlock(cfg, hidden_dims=self.hidden_dims, out_dims=6)
|
| 576 |
+
|
| 577 |
+
self.relu = nn.ReLU(inplace=True)
|
| 578 |
+
|
| 579 |
+
def get_bins(self, bins_num):
|
| 580 |
+
depth_bins_vec = torch.linspace(math.log(self.min_val), math.log(self.max_val), bins_num, device="cuda")
|
| 581 |
+
depth_bins_vec = torch.exp(depth_bins_vec)
|
| 582 |
+
return depth_bins_vec
|
| 583 |
+
|
| 584 |
+
def register_depth_expectation_anchor(self, bins_num, B):
|
| 585 |
+
depth_bins_vec = self.get_bins(bins_num)
|
| 586 |
+
depth_bins_vec = depth_bins_vec.unsqueeze(0).repeat(B, 1)
|
| 587 |
+
self.register_buffer('depth_expectation_anchor', depth_bins_vec, persistent=False)
|
| 588 |
+
|
| 589 |
+
def clamp(self, x):
|
| 590 |
+
y = self.relu(x - self.min_val) + self.min_val
|
| 591 |
+
y = self.max_val - self.relu(self.max_val - y)
|
| 592 |
+
return y
|
| 593 |
+
|
| 594 |
+
def regress_depth(self, feature_map_d):
|
| 595 |
+
prob_feature = self.depth_regressor(feature_map_d)
|
| 596 |
+
prob = prob_feature.softmax(dim=1)
|
| 597 |
+
#prob = prob_feature.float().softmax(dim=1)
|
| 598 |
+
|
| 599 |
+
## Error logging
|
| 600 |
+
if torch.isnan(prob).any():
|
| 601 |
+
print('prob_feat_nan!!!')
|
| 602 |
+
if torch.isinf(prob).any():
|
| 603 |
+
print('prob_feat_inf!!!')
|
| 604 |
+
|
| 605 |
+
# h = prob[0,:,0,0].cpu().numpy().reshape(-1)
|
| 606 |
+
# import matplotlib.pyplot as plt
|
| 607 |
+
# plt.bar(range(len(h)), h)
|
| 608 |
+
B = prob.shape[0]
|
| 609 |
+
if "depth_expectation_anchor" not in self._buffers:
|
| 610 |
+
self.register_depth_expectation_anchor(self.num_depth_regressor_anchor, B)
|
| 611 |
+
d = compute_depth_expectation(
|
| 612 |
+
prob,
|
| 613 |
+
self.depth_expectation_anchor[:B, ...]).unsqueeze(1)
|
| 614 |
+
|
| 615 |
+
## Error logging
|
| 616 |
+
if torch.isnan(d ).any():
|
| 617 |
+
print('d_nan!!!')
|
| 618 |
+
if torch.isinf(d ).any():
|
| 619 |
+
print('d_inf!!!')
|
| 620 |
+
|
| 621 |
+
return (self.clamp(d) - self.max_val)/ self.regress_scale, prob_feature
|
| 622 |
+
|
| 623 |
+
def pred_normal(self, feature_map, confidence):
|
| 624 |
+
normal_out = self.normal_predictor(feature_map)
|
| 625 |
+
|
| 626 |
+
## Error logging
|
| 627 |
+
if torch.isnan(normal_out).any():
|
| 628 |
+
print('norm_nan!!!')
|
| 629 |
+
if torch.isinf(normal_out).any():
|
| 630 |
+
print('norm_feat_inf!!!')
|
| 631 |
+
|
| 632 |
+
return norm_normalize(torch.cat([normal_out, confidence], dim=1))
|
| 633 |
+
#return norm_normalize(torch.cat([normal_out, confidence], dim=1).float())
|
| 634 |
+
|
| 635 |
+
def create_mesh_grid(self, height, width, batch, device="cuda", set_buffer=True):
|
| 636 |
+
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device=device),
|
| 637 |
+
torch.arange(0, width, dtype=torch.float32, device=device)], indexing='ij')
|
| 638 |
+
meshgrid = torch.stack((x, y))
|
| 639 |
+
meshgrid = meshgrid.unsqueeze(0).repeat(batch, 1, 1, 1)
|
| 640 |
+
#self.register_buffer('meshgrid', meshgrid, persistent=False)
|
| 641 |
+
return meshgrid
|
| 642 |
+
|
| 643 |
+
def upsample_flow(self, flow, mask):
|
| 644 |
+
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
|
| 645 |
+
N, D, H, W = flow.shape
|
| 646 |
+
factor = 2 ** self.n_downsample
|
| 647 |
+
mask = mask.view(N, 1, 9, factor, factor, H, W)
|
| 648 |
+
mask = torch.softmax(mask, dim=2)
|
| 649 |
+
#mask = torch.softmax(mask.float(), dim=2)
|
| 650 |
+
|
| 651 |
+
#up_flow = F.unfold(factor * flow, [3,3], padding=1)
|
| 652 |
+
up_flow = F.unfold(flow, [3,3], padding=1)
|
| 653 |
+
up_flow = up_flow.view(N, D, 9, 1, 1, H, W)
|
| 654 |
+
|
| 655 |
+
up_flow = torch.sum(mask * up_flow, dim=2)
|
| 656 |
+
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
|
| 657 |
+
return up_flow.reshape(N, D, factor*H, factor*W)
|
| 658 |
+
|
| 659 |
+
def initialize_flow(self, img):
|
| 660 |
+
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
|
| 661 |
+
N, _, H, W = img.shape
|
| 662 |
+
|
| 663 |
+
coords0 = coords_grid(N, H, W).to(img.device)
|
| 664 |
+
coords1 = coords_grid(N, H, W).to(img.device)
|
| 665 |
+
|
| 666 |
+
return coords0, coords1
|
| 667 |
+
|
| 668 |
+
def upsample(self, x, scale_factor=2):
|
| 669 |
+
"""Upsample input tensor by a factor of 2
|
| 670 |
+
"""
|
| 671 |
+
return interpolate_float32(x, scale_factor=scale_factor*self.up_scale/8, mode="nearest")
|
| 672 |
+
|
| 673 |
+
def forward(self, vit_features, **kwargs):
|
| 674 |
+
## read vit token to multi-scale features
|
| 675 |
+
B, H, W, _, _, num_register_tokens = vit_features[1]
|
| 676 |
+
vit_features = vit_features[0]
|
| 677 |
+
|
| 678 |
+
## Error logging
|
| 679 |
+
if torch.isnan(vit_features[0]).any():
|
| 680 |
+
print('vit_feature_nan!!!')
|
| 681 |
+
if torch.isinf(vit_features[0]).any():
|
| 682 |
+
print('vit_feature_inf!!!')
|
| 683 |
+
|
| 684 |
+
if self.use_cls_token == True:
|
| 685 |
+
vit_features = [[ft[:, 1+num_register_tokens:, :].view(B, H, W, self.in_channels[0]), \
|
| 686 |
+
ft[:, 0:1+num_register_tokens, :].view(B, 1, 1, self.in_channels[0] * (1+num_register_tokens))] for ft in vit_features]
|
| 687 |
+
else:
|
| 688 |
+
vit_features = [ft.view(B, H, W, self.in_channels[0]) for ft in vit_features]
|
| 689 |
+
encoder_features = self.token2feature(vit_features) # 1/14, 1/14, 1/7, 1/4
|
| 690 |
+
|
| 691 |
+
## Error logging
|
| 692 |
+
for en_ft in encoder_features:
|
| 693 |
+
if torch.isnan(en_ft).any():
|
| 694 |
+
print('decoder_feature_nan!!!')
|
| 695 |
+
print(en_ft.shape)
|
| 696 |
+
if torch.isinf(en_ft).any():
|
| 697 |
+
print('decoder_feature_inf!!!')
|
| 698 |
+
print(en_ft.shape)
|
| 699 |
+
|
| 700 |
+
## decode features to init-depth (and confidence)
|
| 701 |
+
ref_feat= self.decoder_mono(encoder_features) # now, 1/4 for depth
|
| 702 |
+
|
| 703 |
+
## Error logging
|
| 704 |
+
if torch.isnan(ref_feat).any():
|
| 705 |
+
print('ref_feat_nan!!!')
|
| 706 |
+
if torch.isinf(ref_feat).any():
|
| 707 |
+
print('ref_feat_inf!!!')
|
| 708 |
+
|
| 709 |
+
feature_map = ref_feat[:, :-2, :, :] # feature map share of depth and normal prediction
|
| 710 |
+
depth_confidence_map = ref_feat[:, -2:-1, :, :]
|
| 711 |
+
normal_confidence_map = ref_feat[:, -1:, :, :]
|
| 712 |
+
depth_pred, binmap = self.regress_depth(feature_map) # regress bin for depth
|
| 713 |
+
normal_pred = self.pred_normal(feature_map, normal_confidence_map) # mlp for normal
|
| 714 |
+
|
| 715 |
+
depth_init = torch.cat((depth_pred, depth_confidence_map, normal_pred), dim=1) # (N, 1+1+4, H, W)
|
| 716 |
+
|
| 717 |
+
## encoder features to context-feature for init-hidden-state and contex-features
|
| 718 |
+
cnet_list = self.context_feature_encoder(encoder_features[::-1])
|
| 719 |
+
net_list = [torch.tanh(x[0]) for x in cnet_list] # x_4, x_8, x_16 of hidden state
|
| 720 |
+
inp_list = [torch.relu(x[1]) for x in cnet_list] # x_4, x_8, x_16 context features
|
| 721 |
+
|
| 722 |
+
# Rather than running the GRU's conv layers on the context features multiple times, we do it once at the beginning
|
| 723 |
+
inp_list = [list(conv(i).split(split_size=conv.out_channels//3, dim=1)) for i,conv in zip(inp_list, self.context_zqr_convs)]
|
| 724 |
+
|
| 725 |
+
coords0, coords1 = self.initialize_flow(net_list[0])
|
| 726 |
+
if depth_init is not None:
|
| 727 |
+
coords1 = coords1 + depth_init
|
| 728 |
+
|
| 729 |
+
if self.training:
|
| 730 |
+
low_resolution_init = [self.clamp(depth_init[:,:1] * self.regress_scale + self.max_val), depth_init[:,1:2], norm_normalize(depth_init[:,2:].clone())]
|
| 731 |
+
init_depth = upflow4(depth_init)
|
| 732 |
+
flow_predictions = [self.clamp(init_depth[:,:1] * self.regress_scale + self.max_val)]
|
| 733 |
+
conf_predictions = [init_depth[:,1:2]]
|
| 734 |
+
normal_outs = [norm_normalize(init_depth[:,2:].clone())]
|
| 735 |
+
|
| 736 |
+
else:
|
| 737 |
+
flow_predictions = []
|
| 738 |
+
conf_predictions = []
|
| 739 |
+
samples_pred_list = []
|
| 740 |
+
coord_list = []
|
| 741 |
+
normal_outs = []
|
| 742 |
+
low_resolution_init = []
|
| 743 |
+
|
| 744 |
+
for itr in range(self.iters):
|
| 745 |
+
# coords1 = coords1.detach()
|
| 746 |
+
flow = coords1 - coords0
|
| 747 |
+
if self.n_gru_layers == 3 and self.slow_fast_gru: # Update low-res GRU
|
| 748 |
+
net_list = self.update_block(net_list, inp_list, iter32=True, iter16=False, iter08=False, update=False)
|
| 749 |
+
if self.n_gru_layers >= 2 and self.slow_fast_gru:# Update low-res GRU and mid-res GRU
|
| 750 |
+
net_list = self.update_block(net_list, inp_list, iter32=self.n_gru_layers==3, iter16=True, iter08=False, update=False)
|
| 751 |
+
net_list, up_mask, delta_flow = self.update_block(net_list, inp_list, None, flow, iter32=self.n_gru_layers==3, iter16=self.n_gru_layers>=2)
|
| 752 |
+
|
| 753 |
+
# F(t+1) = F(t) + \Delta(t)
|
| 754 |
+
coords1 = coords1 + delta_flow
|
| 755 |
+
|
| 756 |
+
# We do not need to upsample or output intermediate results in test_mode
|
| 757 |
+
#if (not self.training) and itr < self.iters-1:
|
| 758 |
+
#continue
|
| 759 |
+
|
| 760 |
+
# upsample predictions
|
| 761 |
+
if up_mask is None:
|
| 762 |
+
flow_up = self.upsample(coords1-coords0, 4)
|
| 763 |
+
else:
|
| 764 |
+
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
|
| 765 |
+
# flow_up = self.upsample(coords1-coords0, 4)
|
| 766 |
+
|
| 767 |
+
flow_predictions.append(self.clamp(flow_up[:,:1] * self.regress_scale + self.max_val))
|
| 768 |
+
conf_predictions.append(flow_up[:,1:2])
|
| 769 |
+
normal_outs.append(norm_normalize(flow_up[:,2:].clone()))
|
| 770 |
+
|
| 771 |
+
outputs=dict(
|
| 772 |
+
prediction=flow_predictions[-1],
|
| 773 |
+
predictions_list=flow_predictions,
|
| 774 |
+
confidence=conf_predictions[-1],
|
| 775 |
+
confidence_list=conf_predictions,
|
| 776 |
+
pred_logit=None,
|
| 777 |
+
# samples_pred_list=samples_pred_list,
|
| 778 |
+
# coord_list=coord_list,
|
| 779 |
+
prediction_normal=normal_outs[-1],
|
| 780 |
+
normal_out_list=normal_outs,
|
| 781 |
+
low_resolution_init=low_resolution_init,
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
return outputs
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
if __name__ == "__main__":
|
| 788 |
+
try:
|
| 789 |
+
from mmcv.utils import Config
|
| 790 |
+
except:
|
| 791 |
+
from mmengine import Config
|
| 792 |
+
cfg = Config.fromfile('/mu.hu/monodepth/mono/configs/RAFTDecoder/vit.raft.full2t.py')
|
| 793 |
+
cfg.model.decode_head.in_channels = [384, 384, 384, 384]
|
| 794 |
+
cfg.model.decode_head.feature_channels = [96, 192, 384, 768]
|
| 795 |
+
cfg.model.decode_head.decoder_channels = [48, 96, 192, 384, 384]
|
| 796 |
+
cfg.model.decode_head.hidden_channels = [48, 48, 48, 48, 48]
|
| 797 |
+
cfg.model.decode_head.up_scale = 7
|
| 798 |
+
|
| 799 |
+
# cfg.model.decode_head.use_cls_token = True
|
| 800 |
+
# vit_feature = [[torch.rand((2, 20, 60, 384)).cuda(), torch.rand(2, 384).cuda()], \
|
| 801 |
+
# [torch.rand((2, 20, 60, 384)).cuda(), torch.rand(2, 384).cuda()], \
|
| 802 |
+
# [torch.rand((2, 20, 60, 384)).cuda(), torch.rand(2, 384).cuda()], \
|
| 803 |
+
# [torch.rand((2, 20, 60, 384)).cuda(), torch.rand(2, 384).cuda()]]
|
| 804 |
+
|
| 805 |
+
cfg.model.decode_head.use_cls_token = True
|
| 806 |
+
cfg.model.decode_head.num_register_tokens = 4
|
| 807 |
+
vit_feature = [[torch.rand((2, (74 * 74) + 5, 384)).cuda(),\
|
| 808 |
+
torch.rand((2, (74 * 74) + 5, 384)).cuda(), \
|
| 809 |
+
torch.rand((2, (74 * 74) + 5, 384)).cuda(), \
|
| 810 |
+
torch.rand((2, (74 * 74) + 5, 384)).cuda()], (2, 74, 74, 1036, 1036, 4)]
|
| 811 |
+
|
| 812 |
+
decoder = RAFTDepthNormalDPT5(cfg).cuda()
|
| 813 |
+
output = decoder(vit_feature)
|
| 814 |
+
temp = 1
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
|
external/Metric3D/training/mono/model/decode_heads/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .RAFTDepthNormalDPTDecoder5 import RAFTDepthNormalDPT5
|
| 2 |
+
|
| 3 |
+
__all__=['RAFTDepthNormalDPT5'
|
| 4 |
+
]
|
external/Metric3D/training/mono/model/losses/AdabinsLoss.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
from torch.nn.utils.rnn import pad_sequence
|
| 5 |
+
#from pytorch3d.loss import chamfer_distance
|
| 6 |
+
|
| 7 |
+
class AdabinsLoss(nn.Module):
|
| 8 |
+
"""
|
| 9 |
+
Losses employed in Adabins.
|
| 10 |
+
"""
|
| 11 |
+
def __init__(self, depth_normalize, variance_focus=0.85, loss_weight=1, out_channel=100, data_type=['stereo', 'lidar'], w_ce=False, w_chamber=False, **kwargs):
|
| 12 |
+
super(AdabinsLoss, self).__init__()
|
| 13 |
+
self.variance_focus = variance_focus
|
| 14 |
+
self.loss_weight = loss_weight
|
| 15 |
+
self.data_type = data_type
|
| 16 |
+
#self.bins_num = out_channel
|
| 17 |
+
#self.cel = nn.CrossEntropyLoss(ignore_index=self.bins_num + 1)
|
| 18 |
+
self.depth_min = depth_normalize[0]
|
| 19 |
+
self.depth_max = depth_normalize[1]
|
| 20 |
+
self.w_ce = w_ce
|
| 21 |
+
self.eps = 1e-6
|
| 22 |
+
|
| 23 |
+
def silog_loss(self, prediction, target, mask):
|
| 24 |
+
d = torch.log(prediction[mask]) - torch.log(target[mask])
|
| 25 |
+
d_square_mean = torch.sum(d ** 2) / (d.numel() + self.eps)
|
| 26 |
+
d_mean = torch.sum(d) / (d.numel() + self.eps)
|
| 27 |
+
loss = torch.sqrt(d_square_mean - self.variance_focus * (d_mean ** 2))
|
| 28 |
+
return loss
|
| 29 |
+
|
| 30 |
+
def chamfer_distance_loss(self, bins, target_depth_maps, mask):
|
| 31 |
+
bin_centers = 0.5 * (bins[:, 1:] + bins[:, :-1])
|
| 32 |
+
n, p = bin_centers.shape
|
| 33 |
+
input_points = bin_centers.view(n, p, 1) # .shape = n, p, 1
|
| 34 |
+
# n, c, h, w = target_depth_maps.shape
|
| 35 |
+
|
| 36 |
+
target_points = target_depth_maps.flatten(1) # n, hwc
|
| 37 |
+
#mask = target_points.ge(1e-3) # only valid ground truth points
|
| 38 |
+
target_points = [p[m] for p, m in zip(target_depth_maps, mask)]
|
| 39 |
+
target_lengths = torch.Tensor([len(t) for t in target_points], dtype=torch.long, device="cuda")
|
| 40 |
+
target_points = pad_sequence(target_points, batch_first=True).unsqueeze(2) # .shape = n, T, 1
|
| 41 |
+
|
| 42 |
+
loss, _ = chamfer_distance(x=input_points, y=target_points, y_lengths=target_lengths)
|
| 43 |
+
return loss
|
| 44 |
+
|
| 45 |
+
# def depth_to_bins(self, depth, mask, depth_edges, size_limite=(512, 960)):
|
| 46 |
+
# """
|
| 47 |
+
# Discretize depth into depth bins. Predefined bins edges are provided.
|
| 48 |
+
# Mark invalid padding area as bins_num + 1
|
| 49 |
+
# Args:
|
| 50 |
+
# @depth: 1-channel depth, [B, 1, h, w]
|
| 51 |
+
# return: depth bins [B, C, h, w]
|
| 52 |
+
# """
|
| 53 |
+
# def _depth_to_bins_block_(depth, mask, depth_edges):
|
| 54 |
+
# bins_id = torch.sum(depth_edges[:, None, None, None, :] < torch.abs(depth)[:, :, :, :, None], dim=-1)
|
| 55 |
+
# bins_id = bins_id - 1
|
| 56 |
+
# invalid_mask = ~mask
|
| 57 |
+
# mask_lower = (depth <= self.depth_min)
|
| 58 |
+
# mask_higher = (depth >= self.depth_max)
|
| 59 |
+
|
| 60 |
+
# bins_id[mask_lower] = 0
|
| 61 |
+
# bins_id[mask_higher] = self.bins_num - 1
|
| 62 |
+
# bins_id[bins_id == self.bins_num] = self.bins_num - 1
|
| 63 |
+
|
| 64 |
+
# bins_id[invalid_mask] = self.bins_num + 1
|
| 65 |
+
# return bins_id
|
| 66 |
+
# # _, _, H, W = depth.shape
|
| 67 |
+
# # bins = mask.clone().long()
|
| 68 |
+
# # h_blocks = np.ceil(H / size_limite[0]).astype(np.int)
|
| 69 |
+
# # w_blocks = np.ceil(W/ size_limite[1]).astype(np.int)
|
| 70 |
+
# # for i in range(h_blocks):
|
| 71 |
+
# # for j in range(w_blocks):
|
| 72 |
+
# # h_start = i*size_limite[0]
|
| 73 |
+
# # h_end_proposal = (i + 1) * size_limite[0]
|
| 74 |
+
# # h_end = h_end_proposal if h_end_proposal < H else H
|
| 75 |
+
# # w_start = j*size_limite[1]
|
| 76 |
+
# # w_end_proposal = (j + 1) * size_limite[1]
|
| 77 |
+
# # w_end = w_end_proposal if w_end_proposal < W else W
|
| 78 |
+
# # bins_ij = _depth_to_bins_block_(
|
| 79 |
+
# # depth[:, :, h_start:h_end, w_start:w_end],
|
| 80 |
+
# # mask[:, :, h_start:h_end, w_start:w_end],
|
| 81 |
+
# # depth_edges
|
| 82 |
+
# # )
|
| 83 |
+
# # bins[:, :, h_start:h_end, w_start:w_end] = bins_ij
|
| 84 |
+
# bins = _depth_to_bins_block_(depth, mask, depth_edges)
|
| 85 |
+
# return bins
|
| 86 |
+
|
| 87 |
+
# def ce_loss(self, pred_logit, target, mask, bins_edges):
|
| 88 |
+
# target_depth_bins = self.depth_to_bins(target, mask, bins_edges)
|
| 89 |
+
# loss = self.cel(pred_logit, target_depth_bins.squeeze().long())
|
| 90 |
+
# return loss
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def forward(self, prediction, target, bins_edges, mask=None, **kwargs):
|
| 94 |
+
silog_loss = self.silog_loss(prediction=prediction, target=target, mask=mask)
|
| 95 |
+
#cf_loss = self.chamfer_distance_loss(bins=bins_edges, target_depth_maps=target, mask=mask)
|
| 96 |
+
loss = silog_loss * 10 #+ 0.1 * cf_loss
|
| 97 |
+
# if self.w_ce:
|
| 98 |
+
# loss = loss + self.ce_loss(kwargs['pred_logit'], target, mask, bins_edges)
|
| 99 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 100 |
+
raise RuntimeError(f'Adabins loss error, {loss}')
|
| 101 |
+
return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/ConfidenceGuideLoss.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
class ConfidenceGuideLoss(nn.Module):
|
| 6 |
+
"""
|
| 7 |
+
confidence guide depth loss.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self, loss_weight=1, data_type=['stereo', 'lidar', 'denselidar'], loss_gamma=0.9, conf_loss=True, **kwargs):
|
| 10 |
+
super(ConfidenceGuideLoss, self).__init__()
|
| 11 |
+
self.loss_weight = loss_weight
|
| 12 |
+
self.data_type = data_type
|
| 13 |
+
self.eps = 1e-6
|
| 14 |
+
self.loss_gamma = loss_gamma
|
| 15 |
+
self.conf_loss = conf_loss
|
| 16 |
+
|
| 17 |
+
def forward(self, samples_pred_list, target, coord_list, mask=None, **kwargs):
|
| 18 |
+
loss = 0.0
|
| 19 |
+
n_predictions = len(samples_pred_list)
|
| 20 |
+
for i, (pred, coord) in enumerate(zip(samples_pred_list, coord_list)):
|
| 21 |
+
# coord: B, 1, N, 2
|
| 22 |
+
# pred: B, 2, N
|
| 23 |
+
gt_depth_ = F.grid_sample(target, coord, mode='nearest', align_corners=True) # (B, 1, 1, N)
|
| 24 |
+
gt_depth_mask_ = F.grid_sample(mask.float(), coord, mode='nearest', align_corners=True) # (B, 1, 1, N)
|
| 25 |
+
gt_depth_ = gt_depth_[:, :, 0, :]
|
| 26 |
+
gt_depth_mask_ = gt_depth_mask_[:, :, 0, :] > 0.5
|
| 27 |
+
|
| 28 |
+
pred_depth, pred_conf = pred[:, :1, :], pred[:, 1:, :]
|
| 29 |
+
|
| 30 |
+
# We adjust the loss_gamma so it is consistent for any number of RAFT-Stereo iterations
|
| 31 |
+
adjusted_loss_gamma = self.loss_gamma**(15/(n_predictions - 1))
|
| 32 |
+
i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
|
| 33 |
+
|
| 34 |
+
# depth L1 loss
|
| 35 |
+
diff = torch.abs(pred_depth - gt_depth_) * gt_depth_mask_
|
| 36 |
+
curr_loss = torch.sum(diff) / (torch.sum(gt_depth_mask_) + self.eps)
|
| 37 |
+
if torch.isnan(curr_loss).item() | torch.isinf(curr_loss).item():
|
| 38 |
+
curr_loss = 0 * torch.sum(pred_depth)
|
| 39 |
+
print(f'GRUSequenceLoss-depth NAN error, {loss}')
|
| 40 |
+
|
| 41 |
+
# confidence L1 loss
|
| 42 |
+
conf_loss = 0.0
|
| 43 |
+
if self.conf_loss:
|
| 44 |
+
conf_mask = torch.abs(gt_depth_ - pred_depth) < gt_depth_
|
| 45 |
+
conf_mask = conf_mask & gt_depth_mask_
|
| 46 |
+
gt_confidence = (1 - torch.abs((pred_depth - gt_depth_) / gt_depth_)) * conf_mask
|
| 47 |
+
conf_loss = torch.sum(torch.abs(pred_conf - gt_confidence) * conf_mask) / (torch.sum(conf_mask) + self.eps)
|
| 48 |
+
if torch.isnan(conf_loss).item() | torch.isinf(conf_loss).item():
|
| 49 |
+
conf_loss = 0 * torch.sum(pred_conf)
|
| 50 |
+
print(f'GRUSequenceLoss-confidence NAN error, {conf_loss}')
|
| 51 |
+
|
| 52 |
+
loss += (conf_loss + curr_loss) * i_weight
|
| 53 |
+
|
| 54 |
+
return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/ConfidenceLoss.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class ConfidenceLoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
confidence loss.
|
| 7 |
+
"""
|
| 8 |
+
def __init__(self, loss_weight=1, data_type=['stereo', 'lidar', 'denselidar'], **kwargs):
|
| 9 |
+
super(ConfidenceLoss, self).__init__()
|
| 10 |
+
self.loss_weight = loss_weight
|
| 11 |
+
self.data_type = data_type
|
| 12 |
+
self.eps = 1e-6
|
| 13 |
+
|
| 14 |
+
def forward(self, prediction, target, confidence, mask=None, **kwargs):
|
| 15 |
+
conf_mask = torch.abs(target - prediction) < target
|
| 16 |
+
conf_mask = conf_mask & mask
|
| 17 |
+
gt_confidence = (1 - torch.abs((prediction - target) / target)) * conf_mask
|
| 18 |
+
loss = torch.sum(torch.abs(confidence - gt_confidence) * conf_mask) / (torch.sum(conf_mask) + self.eps)
|
| 19 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 20 |
+
loss = 0 * torch.sum(confidence)
|
| 21 |
+
print(f'ConfidenceLoss NAN error, {loss}')
|
| 22 |
+
return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/GRUSequenceLoss.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class GRUSequenceLoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Loss function defined over sequence of depth predictions
|
| 7 |
+
"""
|
| 8 |
+
def __init__(self, loss_weight=1, data_type=['lidar', 'denselidar', 'stereo', 'denselidar_syn'], loss_gamma=0.9, silog=False, stereo_sup=0.001, stereo_dataset=['KITTI', 'NYU'], **kwargs):
|
| 9 |
+
super(GRUSequenceLoss, self).__init__()
|
| 10 |
+
self.loss_weight = loss_weight
|
| 11 |
+
self.data_type = data_type
|
| 12 |
+
self.eps = 1e-6
|
| 13 |
+
self.loss_gamma = loss_gamma
|
| 14 |
+
self.silog = silog
|
| 15 |
+
self.variance_focus = 0.5
|
| 16 |
+
self.stereo_sup = stereo_sup
|
| 17 |
+
self.stereo_dataset = stereo_dataset
|
| 18 |
+
|
| 19 |
+
# assert stereo_mode in ['stereo', 'self_sup']
|
| 20 |
+
# self.stereo_mode = stereo_mode
|
| 21 |
+
# self.stereo_max = stereo_max
|
| 22 |
+
|
| 23 |
+
def silog_loss(self, prediction, target, mask):
|
| 24 |
+
mask = mask & (prediction > 0.01) & (target > 0.01)
|
| 25 |
+
d = torch.log(prediction[mask]) - torch.log(target[mask])
|
| 26 |
+
# d_square_mean = torch.sum(d ** 2) / (d.numel() + self.eps)
|
| 27 |
+
# d_mean = torch.sum(d) / (d.numel() + self.eps)
|
| 28 |
+
# loss = d_square_mean - self.variance_focus * (d_mean ** 2)
|
| 29 |
+
loss = torch.sum(torch.abs(d)) / (d.numel() + self.eps)
|
| 30 |
+
print("new log l1 loss")
|
| 31 |
+
return loss
|
| 32 |
+
|
| 33 |
+
def conf_loss(self, confidence, prediction, target, mask):
|
| 34 |
+
conf_mask = torch.abs(target - prediction) < target
|
| 35 |
+
conf_mask = conf_mask & mask
|
| 36 |
+
gt_confidence = (1 - torch.abs((prediction - target) / target)) * conf_mask
|
| 37 |
+
loss = torch.sum(torch.abs(confidence - gt_confidence) * conf_mask) / (torch.sum(conf_mask) + self.eps)
|
| 38 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 39 |
+
print(f'GRUSequenceLoss-confidence NAN error, {loss}')
|
| 40 |
+
loss = 0 * torch.sum(confidence)
|
| 41 |
+
return loss
|
| 42 |
+
|
| 43 |
+
def forward(self, predictions_list, target, stereo_depth, confidence_list=None, mask=None, **kwargs):
|
| 44 |
+
device = target.device
|
| 45 |
+
|
| 46 |
+
batches_dataset = kwargs['dataset']
|
| 47 |
+
self.batch_with_stereo = torch.tensor([1 if batch_dataset in self.stereo_dataset else 0 \
|
| 48 |
+
for batch_dataset in batches_dataset], device=device)[:,None,None,None]
|
| 49 |
+
|
| 50 |
+
n_predictions = len(predictions_list)
|
| 51 |
+
assert n_predictions >= 1
|
| 52 |
+
loss = 0.0
|
| 53 |
+
|
| 54 |
+
for i, prediction in enumerate(predictions_list):
|
| 55 |
+
# if self.stereo_mode == 'self_sup' and self.stereo_sup > 1e-8:
|
| 56 |
+
# B, C, H, W = target.shape
|
| 57 |
+
# prediction_nan = prediction.clone().detach()
|
| 58 |
+
# target_nan = target.clone()
|
| 59 |
+
# prediction_nan[~mask] = float('nan')
|
| 60 |
+
# target_nan[~mask] = float('nan')
|
| 61 |
+
# gt_median = target_nan.reshape((B, C,-1)).nanmedian(2)[0][:, :, None, None]
|
| 62 |
+
|
| 63 |
+
# pred_median = prediction_nan.reshape((B, C,-1)).nanmedian(2)[0][:, :, None, None]
|
| 64 |
+
# scale = gt_median / (pred_median + 1e-8)
|
| 65 |
+
|
| 66 |
+
# stereo_depth = (0.0 * stereo_depth + scale * prediction * (prediction < (self.stereo_max - 1)) + \
|
| 67 |
+
# prediction * (prediction > (self.stereo_max - 1))).detach()
|
| 68 |
+
|
| 69 |
+
# We adjust the loss_gamma so it is consistent for any number of RAFT-Stereo iterations
|
| 70 |
+
adjusted_loss_gamma = self.loss_gamma**(15/(n_predictions - 1))
|
| 71 |
+
i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
|
| 72 |
+
|
| 73 |
+
# depth L1 loss
|
| 74 |
+
if self.silog and mask.sum() > 0:
|
| 75 |
+
curr_loss = self.silog_loss(prediction, target, mask)
|
| 76 |
+
else:
|
| 77 |
+
diff = torch.abs(prediction - target) * mask
|
| 78 |
+
#diff = diff + diff * diff * 1.0
|
| 79 |
+
curr_loss = torch.sum(diff) / (torch.sum(mask) + self.eps)
|
| 80 |
+
if torch.isnan(curr_loss).item() | torch.isinf(curr_loss).item():
|
| 81 |
+
print(f'GRUSequenceLoss-depth NAN error, {curr_loss}')
|
| 82 |
+
curr_loss = 0 * torch.sum(prediction)
|
| 83 |
+
|
| 84 |
+
# confidence L1 loss
|
| 85 |
+
conf_loss = 0
|
| 86 |
+
if confidence_list is not None:
|
| 87 |
+
conf_loss = self.conf_loss(confidence_list[i], prediction, target, mask)
|
| 88 |
+
|
| 89 |
+
# stereo depth loss
|
| 90 |
+
mask_stereo = 1 + torch.nn.functional.max_pool2d(\
|
| 91 |
+
- torch.nn.functional.max_pool2d(mask * 1.0, 3, stride=1, padding=1, dilation=1), 3, stride=1, padding=1, dilation=1)
|
| 92 |
+
|
| 93 |
+
stereo_diff = torch.abs(prediction - stereo_depth) * mask_stereo
|
| 94 |
+
#stereo_diff = stereo_diff + stereo_diff * stereo_diff * 1.0
|
| 95 |
+
stereo_depth_loss = torch.sum(self.batch_with_stereo * stereo_diff * mask_stereo) / (torch.sum(mask_stereo) + self.eps)
|
| 96 |
+
stereo_depth_loss = self.stereo_sup * stereo_depth_loss
|
| 97 |
+
|
| 98 |
+
loss += (conf_loss + curr_loss + stereo_depth_loss) * i_weight
|
| 99 |
+
#raise RuntimeError(f'Silog error, {loss}, d_square_mean: {d_square_mean}, d_mean: {d_mean}')
|
| 100 |
+
return loss * self.loss_weight
|
| 101 |
+
|
| 102 |
+
# import torch
|
| 103 |
+
# import torch.nn as nn
|
| 104 |
+
|
| 105 |
+
# class GRUSequenceLoss(nn.Module):
|
| 106 |
+
# """
|
| 107 |
+
# Loss function defined over sequence of depth predictions
|
| 108 |
+
# """
|
| 109 |
+
# def __init__(self, loss_weight=1, data_type=['lidar', 'denselidar', 'stereo', 'denselidar_syn'], loss_gamma=0.9, silog=False, stereo_sup=0.001, stereo_dataset=['BigData'], **kwargs):
|
| 110 |
+
# super(GRUSequenceLoss, self).__init__()
|
| 111 |
+
# self.loss_weight = loss_weight
|
| 112 |
+
# self.data_type = data_type
|
| 113 |
+
# self.eps = 1e-6
|
| 114 |
+
# self.loss_gamma = loss_gamma
|
| 115 |
+
# self.silog = silog
|
| 116 |
+
# self.variance_focus = 0.5
|
| 117 |
+
# self.stereo_sup = stereo_sup
|
| 118 |
+
# self.stereo_dataset = stereo_dataset
|
| 119 |
+
|
| 120 |
+
# def silog_loss(self, prediction, target, mask):
|
| 121 |
+
# mask = mask & (prediction > 0.01) & (target > 0.01)
|
| 122 |
+
# d = torch.log(prediction[mask]) - torch.log(target[mask])
|
| 123 |
+
# # d_square_mean = torch.sum(d ** 2) / (d.numel() + self.eps)
|
| 124 |
+
# # d_mean = torch.sum(d) / (d.numel() + self.eps)
|
| 125 |
+
# # loss = d_square_mean - self.variance_focus * (d_mean ** 2)
|
| 126 |
+
# loss = torch.sum(torch.abs(d)) / (d.numel() + self.eps)
|
| 127 |
+
# print("new log l1 loss")
|
| 128 |
+
# return loss
|
| 129 |
+
|
| 130 |
+
# def conf_loss(self, confidence, prediction, target, mask):
|
| 131 |
+
# conf_mask = torch.abs(target - prediction) < target
|
| 132 |
+
# conf_mask = conf_mask & mask
|
| 133 |
+
# gt_confidence = (1 - torch.abs((prediction - target) / target)) * conf_mask
|
| 134 |
+
# loss = torch.sum(torch.abs(confidence - gt_confidence) * conf_mask) / (torch.sum(conf_mask) + self.eps)
|
| 135 |
+
# if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 136 |
+
# print(f'GRUSequenceLoss-confidence NAN error, {loss}')
|
| 137 |
+
# loss = 0 * torch.sum(confidence)
|
| 138 |
+
# return loss
|
| 139 |
+
|
| 140 |
+
# def forward(self, predictions_list, target, stereo_depth, confidence_list=None, mask=None, **kwargs):
|
| 141 |
+
# device = target.device
|
| 142 |
+
|
| 143 |
+
# batches_dataset = kwargs['dataset']
|
| 144 |
+
# self.batch_with_stereo = torch.tensor([1 if batch_dataset in self.stereo_dataset else 0 \
|
| 145 |
+
# for batch_dataset in batches_dataset], device=device)[:,None,None,None]
|
| 146 |
+
|
| 147 |
+
# n_predictions = len(predictions_list)
|
| 148 |
+
# assert n_predictions >= 1
|
| 149 |
+
# loss = 0.0
|
| 150 |
+
|
| 151 |
+
# for i, prediction in enumerate(predictions_list):
|
| 152 |
+
# # We adjust the loss_gamma so it is consistent for any number of RAFT-Stereo iterations
|
| 153 |
+
# adjusted_loss_gamma = self.loss_gamma**(15/(n_predictions - 1))
|
| 154 |
+
# i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
|
| 155 |
+
|
| 156 |
+
# # depth L1 loss
|
| 157 |
+
# if self.silog and mask.sum() > 0:
|
| 158 |
+
# curr_loss = self.silog_loss(prediction, target, mask)
|
| 159 |
+
# else:
|
| 160 |
+
# diff = torch.abs(prediction - target) * mask
|
| 161 |
+
# curr_loss = torch.sum(diff) / (torch.sum(mask) + self.eps)
|
| 162 |
+
# if torch.isnan(curr_loss).item() | torch.isinf(curr_loss).item():
|
| 163 |
+
# print(f'GRUSequenceLoss-depth NAN error, {curr_loss}')
|
| 164 |
+
# curr_loss = 0 * torch.sum(prediction)
|
| 165 |
+
|
| 166 |
+
# # confidence L1 loss
|
| 167 |
+
# conf_loss = 0
|
| 168 |
+
# if confidence_list is not None:
|
| 169 |
+
# conf_loss = self.conf_loss(confidence_list[i], prediction, target, mask)
|
| 170 |
+
|
| 171 |
+
# # stereo depth loss
|
| 172 |
+
# mask_stereo = 1 + torch.nn.functional.max_pool2d(\
|
| 173 |
+
# - torch.nn.functional.max_pool2d(mask * 1.0, 5, stride=1, padding=2, dilation=1), 5, stride=1, padding=2, dilation=1)
|
| 174 |
+
|
| 175 |
+
# stereo_diff = torch.abs(prediction - stereo_depth) * mask_stereo
|
| 176 |
+
# stereo_depth_loss = torch.sum(self.batch_with_stereo * stereo_diff * mask_stereo) / (torch.sum(mask_stereo) + self.eps)
|
| 177 |
+
# stereo_depth_loss = self.stereo_sup * stereo_depth_loss
|
| 178 |
+
|
| 179 |
+
# loss += (conf_loss + curr_loss + stereo_depth_loss) * i_weight
|
| 180 |
+
# #raise RuntimeError(f'Silog error, {loss}, d_square_mean: {d_square_mean}, d_mean: {d_mean}')
|
| 181 |
+
# return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/Gradient.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
EPSILON = 1e-6
|
| 5 |
+
"""
|
| 6 |
+
# @Zhengqi Li version.
|
| 7 |
+
def GradientLoss(self, log_prediction_d, mask, log_gt):
|
| 8 |
+
log_d_diff = log_prediction_d - log_gt
|
| 9 |
+
|
| 10 |
+
v_gradient = torch.abs(log_d_diff[:, :-2, :] - log_d_diff[:, 2:, :])
|
| 11 |
+
v_mask = torch.mul(mask[:, :-2, :], mask[:, 2:, :])
|
| 12 |
+
v_gradient = torch.mul(v_gradient, v_mask)
|
| 13 |
+
|
| 14 |
+
h_gradient = torch.abs(log_d_diff[:, :, :-2] - log_d_diff[:, :, 2:])
|
| 15 |
+
h_mask = torch.mul(mask[:, :, :-2], mask[:, :, 2:])
|
| 16 |
+
h_gradient = torch.mul(h_gradient, h_mask)
|
| 17 |
+
|
| 18 |
+
N = torch.sum(h_mask) + torch.sum(v_mask) + EPSILON
|
| 19 |
+
|
| 20 |
+
gradient_loss = torch.sum(h_gradient) + torch.sum(v_gradient)
|
| 21 |
+
gradient_loss = gradient_loss / N
|
| 22 |
+
|
| 23 |
+
return gradient_loss
|
| 24 |
+
"""
|
| 25 |
+
def gradient_log_loss(log_prediction_d, log_gt, mask):
|
| 26 |
+
log_d_diff = log_prediction_d - log_gt
|
| 27 |
+
|
| 28 |
+
v_gradient = torch.abs(log_d_diff[:, :, :-2, :] - log_d_diff[:, :, 2:, :])
|
| 29 |
+
v_mask = torch.mul(mask[:, :, :-2, :], mask[:, :, 2:, :])
|
| 30 |
+
v_gradient = torch.mul(v_gradient, v_mask)
|
| 31 |
+
|
| 32 |
+
h_gradient = torch.abs(log_d_diff[:, :, :, :-2] - log_d_diff[:, :, :, 2:])
|
| 33 |
+
h_mask = torch.mul(mask[:, :, :, :-2], mask[:, :, :, 2:])
|
| 34 |
+
h_gradient = torch.mul(h_gradient, h_mask)
|
| 35 |
+
|
| 36 |
+
N = torch.sum(h_mask) + torch.sum(v_mask) + EPSILON
|
| 37 |
+
|
| 38 |
+
gradient_loss = torch.sum(h_gradient) + torch.sum(v_gradient)
|
| 39 |
+
gradient_loss = gradient_loss / N
|
| 40 |
+
|
| 41 |
+
return gradient_loss
|
| 42 |
+
|
| 43 |
+
class GradientLoss_Li(nn.Module):
|
| 44 |
+
def __init__(self, scale_num=1, loss_weight=1, data_type = ['lidar', 'stereo'], **kwargs):
|
| 45 |
+
super(GradientLoss_Li, self).__init__()
|
| 46 |
+
self.__scales = scale_num
|
| 47 |
+
self.loss_weight = loss_weight
|
| 48 |
+
self.data_type = data_type
|
| 49 |
+
self.eps = 1e-6
|
| 50 |
+
|
| 51 |
+
def forward(self, prediction, target, mask, **kwargs):
|
| 52 |
+
total = 0
|
| 53 |
+
target_trans = target + (~mask) * 100
|
| 54 |
+
pred_log = torch.log(prediction)
|
| 55 |
+
gt_log = torch.log(target_trans)
|
| 56 |
+
for scale in range(self.__scales):
|
| 57 |
+
step = pow(2, scale)
|
| 58 |
+
|
| 59 |
+
total += gradient_log_loss(pred_log[:, ::step, ::step], gt_log[:, ::step, ::step], mask[:, ::step, ::step])
|
| 60 |
+
loss = total / self.__scales
|
| 61 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 62 |
+
raise RuntimeError(f'VNL error, {loss}')
|
| 63 |
+
return loss * self.loss_weight
|
| 64 |
+
|
| 65 |
+
######################################################
|
| 66 |
+
# Multi-scale gradient matching loss, @Ke Xian implementation.
|
| 67 |
+
#####################################################
|
| 68 |
+
def gradient_loss(prediction, target, mask):
|
| 69 |
+
M = torch.sum(mask, (1, 2))
|
| 70 |
+
|
| 71 |
+
diff = prediction - target
|
| 72 |
+
diff = torch.mul(mask, diff)
|
| 73 |
+
|
| 74 |
+
grad_x = torch.abs(diff[:, :, 1:] - diff[:, :, :-1])
|
| 75 |
+
mask_x = torch.mul(mask[:, :, 1:], mask[:, :, :-1])
|
| 76 |
+
grad_x = torch.mul(mask_x, grad_x)
|
| 77 |
+
|
| 78 |
+
grad_y = torch.abs(diff[:, 1:, :] - diff[:, :-1, :])
|
| 79 |
+
mask_y = torch.mul(mask[:, 1:, :], mask[:, :-1, :])
|
| 80 |
+
grad_y = torch.mul(mask_y, grad_y)
|
| 81 |
+
|
| 82 |
+
image_loss = torch.sum(grad_x, (1, 2)) + torch.sum(grad_y, (1, 2))
|
| 83 |
+
valid = M.nonzero()
|
| 84 |
+
if image_loss[valid].numel() > 0:
|
| 85 |
+
image_loss[valid] = image_loss[valid] / M[valid]
|
| 86 |
+
loss = torch.mean(image_loss)
|
| 87 |
+
else:
|
| 88 |
+
loss = 0 * torch.sum(prediction)
|
| 89 |
+
|
| 90 |
+
return loss
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
class GradientLoss(nn.Module):
|
| 94 |
+
def __init__(self, scale_num=4, loss_weight=1, **kwargs):
|
| 95 |
+
super(GradientLoss, self).__init__()
|
| 96 |
+
self.__scales = scale_num
|
| 97 |
+
self.loss_weight = loss_weight
|
| 98 |
+
def forward(self, prediction, target, mask, **kwargs):
|
| 99 |
+
total = 0
|
| 100 |
+
for scale in range(self.__scales):
|
| 101 |
+
step = pow(2, scale)
|
| 102 |
+
total += gradient_loss(prediction[:, ::step, ::step], target[:, ::step, ::step], mask[:, ::step, ::step])
|
| 103 |
+
|
| 104 |
+
return total * self.loss_weight
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
if __name__ == '__main__':
|
| 108 |
+
import numpy as np
|
| 109 |
+
gradient = GradientLoss_Li(4)
|
| 110 |
+
|
| 111 |
+
pred_depth = np.random.random([2, 1, 480, 640])
|
| 112 |
+
gt_depth = np.ones_like(pred_depth) * (-1) #np.random.random([2, 1, 480, 640]) - 0.5 #
|
| 113 |
+
#gt_depth = np.abs(gt_depth)
|
| 114 |
+
intrinsic = [[100, 100, 200, 200], [200, 200, 300, 300]]
|
| 115 |
+
|
| 116 |
+
pred = torch.from_numpy(pred_depth).cuda()
|
| 117 |
+
gt = torch.from_numpy(gt_depth).cuda()
|
| 118 |
+
mask = gt > 0
|
| 119 |
+
|
| 120 |
+
loss = gradient(gt, gt, mask)
|
| 121 |
+
print(loss)
|
external/Metric3D/training/mono/model/losses/HDNL.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class HDNLoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Hieratical depth normalization loss.
|
| 7 |
+
loss = MAE((d-median(d)/s - (d'-median(d'))/s'), s = mean(d- median(d))
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self, loss_weight=1, grid=3, data_type=['sfm', 'stereo', 'lidar'], **kwargs):
|
| 10 |
+
super(HDNLoss, self).__init__()
|
| 11 |
+
self.loss_weight = loss_weight
|
| 12 |
+
self.grid = grid
|
| 13 |
+
self.data_type = data_type
|
| 14 |
+
|
| 15 |
+
def get_hierachy_masks(self, grid, depth_gt, mask_valid):
|
| 16 |
+
|
| 17 |
+
batch_map_grid = []
|
| 18 |
+
for mask_index in range(depth_gt.shape[0]):
|
| 19 |
+
depth_map = depth_gt[mask_index]
|
| 20 |
+
valid_map = mask_valid[mask_index]
|
| 21 |
+
|
| 22 |
+
# print (depth_map[valid_map].view(-1).shape)
|
| 23 |
+
if depth_map[valid_map].numel() == 0:
|
| 24 |
+
map_grid_list = [valid_map for _ in range(2 ** (grid) - 1)]
|
| 25 |
+
else:
|
| 26 |
+
valid_values = depth_map[valid_map]
|
| 27 |
+
|
| 28 |
+
max_d = valid_values.max()
|
| 29 |
+
min_d = valid_values.min()
|
| 30 |
+
|
| 31 |
+
anchor_power = [(1 / 2) ** (i) for i in range(grid)]
|
| 32 |
+
anchor_power.reverse()
|
| 33 |
+
|
| 34 |
+
map_grid_list = []
|
| 35 |
+
for anchor in anchor_power:
|
| 36 |
+
# range
|
| 37 |
+
for i in range(int(1 / anchor)):
|
| 38 |
+
mask_new = (depth_map >= min_d + (max_d - min_d) * i * anchor) & (
|
| 39 |
+
depth_map < min_d + (max_d - min_d) * (i + 1) * anchor+1e-30)
|
| 40 |
+
# print (f'[{i*anchor},{(i+1)*anchor}]')
|
| 41 |
+
mask_new = mask_new & valid_map
|
| 42 |
+
map_grid_list.append(mask_new)
|
| 43 |
+
map_grid_list = torch.stack(map_grid_list, dim=0)
|
| 44 |
+
batch_map_grid.append(map_grid_list)
|
| 45 |
+
batch_map_grid = torch.stack(batch_map_grid, dim=1)
|
| 46 |
+
return batch_map_grid
|
| 47 |
+
|
| 48 |
+
def ssi_mae(self, prediction, target, mask_valid):
|
| 49 |
+
B, C, H, W = target.shape
|
| 50 |
+
prediction_nan = prediction.clone()
|
| 51 |
+
target_nan = target.clone()
|
| 52 |
+
prediction_nan[~mask_valid] = float('nan')
|
| 53 |
+
target_nan[~mask_valid] = float('nan')
|
| 54 |
+
|
| 55 |
+
valid_pixs = mask_valid.reshape((B, C,-1)).sum(dim=2, keepdims=True) + 1e-10
|
| 56 |
+
valid_pixs = valid_pixs[:, :, :, None]
|
| 57 |
+
|
| 58 |
+
gt_median = target_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 59 |
+
gt_median[torch.isnan(gt_median)] = 0
|
| 60 |
+
gt_diff = (torch.abs(target - gt_median) * mask_valid).reshape((B, C, -1))
|
| 61 |
+
gt_s = gt_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 62 |
+
gt_trans = (target - gt_median) / (gt_s + 1e-8)
|
| 63 |
+
|
| 64 |
+
pred_median = prediction_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 65 |
+
pred_median[torch.isnan(pred_median)] = 0
|
| 66 |
+
pred_diff = (torch.abs(prediction - pred_median) * mask_valid).reshape((B, C, -1))
|
| 67 |
+
pred_s = pred_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 68 |
+
pred_trans = (prediction - pred_median) / (pred_s + 1e-8)
|
| 69 |
+
|
| 70 |
+
loss = torch.sum(torch.abs(gt_trans - pred_trans)*mask_valid) / (torch.sum(mask_valid) + 1e-8)
|
| 71 |
+
return pred_trans, gt_trans, loss
|
| 72 |
+
|
| 73 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 74 |
+
"""
|
| 75 |
+
Calculate loss.
|
| 76 |
+
"""
|
| 77 |
+
B, C, H, W = target.shape
|
| 78 |
+
hierachy_masks = self.get_hierachy_masks(self.grid, target, mask)
|
| 79 |
+
hierachy_masks_shape = hierachy_masks.reshape(-1, C, H, W)
|
| 80 |
+
prediction_hie = prediction.unsqueeze(0).repeat(hierachy_masks.shape[0], 1, 1, 1, 1).reshape(-1, C, H, W)
|
| 81 |
+
|
| 82 |
+
target_hie = target.unsqueeze(0).repeat(hierachy_masks.shape[0], 1, 1, 1, 1).reshape(-1, C, H, W)
|
| 83 |
+
|
| 84 |
+
#_, _, loss = self.ssi_mae(prediction, target, mask)
|
| 85 |
+
_, _, loss = self.ssi_mae(prediction_hie, target_hie, hierachy_masks_shape)
|
| 86 |
+
return loss * self.loss_weight
|
| 87 |
+
|
| 88 |
+
if __name__ == '__main__':
|
| 89 |
+
ssil = HDNLoss()
|
| 90 |
+
pred = torch.rand((2, 1, 256, 256)).cuda()
|
| 91 |
+
gt = torch.rand((2, 1, 256, 256)).cuda()#torch.zeros_like(pred).cuda() #
|
| 92 |
+
gt[:, :, 100:256, 0:100] = -1
|
| 93 |
+
mask = gt > 0
|
| 94 |
+
out = ssil(pred, gt, mask)
|
| 95 |
+
print(out)
|
external/Metric3D/training/mono/model/losses/HDNL_random.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
class HDNRandomLoss(nn.Module):
|
| 6 |
+
"""
|
| 7 |
+
Hieratical depth normalization loss. Replace the original hieratical depth ranges with randomly sampled ranges.
|
| 8 |
+
loss = MAE((d-median(d)/s - (d'-median(d'))/s'), s = mean(d- median(d))
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self, loss_weight=1, random_num=32, data_type=['sfm', 'stereo', 'lidar', 'denselidar', 'denselidar_nometric', 'denselidar_syn'], norm_dataset=['Taskonomy', 'Matterport3D', 'Replica', 'Hypersim'], disable_dataset=['MapillaryPSD'], **kwargs):
|
| 11 |
+
super(HDNRandomLoss, self).__init__()
|
| 12 |
+
self.loss_weight = loss_weight
|
| 13 |
+
self.random_num = random_num
|
| 14 |
+
self.eps = 1e-6
|
| 15 |
+
self.data_type = data_type
|
| 16 |
+
self.disable_dataset = disable_dataset
|
| 17 |
+
|
| 18 |
+
def get_random_masks_for_batch(self, depth_gt: torch.Tensor, mask_valid: torch.Tensor)-> torch.Tensor:
|
| 19 |
+
valid_values = depth_gt[mask_valid]
|
| 20 |
+
max_d = valid_values.max().item() if valid_values.numel() > 0 else 0.0
|
| 21 |
+
min_d = valid_values.min().item() if valid_values.numel() > 0 else 0.0
|
| 22 |
+
|
| 23 |
+
sample_min_d = np.random.uniform(0, 0.75, self.random_num) * (max_d - min_d) + min_d
|
| 24 |
+
sample_max_d = np.random.uniform(sample_min_d + 0.1, 1-self.eps, self.random_num) * (max_d - min_d) + min_d
|
| 25 |
+
|
| 26 |
+
mask_new = [(depth_gt >= sample_min_d[i]) & (depth_gt < sample_max_d[i] + 1e-30) & mask_valid for i in range(self.random_num)]
|
| 27 |
+
mask_new = torch.stack(mask_new, dim=0).cuda() #[N, 1, H, W]
|
| 28 |
+
return mask_new
|
| 29 |
+
|
| 30 |
+
def ssi_mae(self, prediction, target, mask_valid):
|
| 31 |
+
B, C, H, W = target.shape
|
| 32 |
+
prediction_nan = prediction.clone().detach()
|
| 33 |
+
target_nan = target.clone()
|
| 34 |
+
prediction_nan[~mask_valid] = float('nan')
|
| 35 |
+
target_nan[~mask_valid] = float('nan')
|
| 36 |
+
|
| 37 |
+
valid_pixs = mask_valid.reshape((B, C,-1)).sum(dim=2, keepdims=True) + self.eps
|
| 38 |
+
valid_pixs = valid_pixs[:, :, :, None]
|
| 39 |
+
|
| 40 |
+
gt_median = target_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 41 |
+
gt_median[torch.isnan(gt_median)] = 0
|
| 42 |
+
gt_diff = (torch.abs(target - gt_median) * mask_valid).reshape((B, C, -1))
|
| 43 |
+
gt_s = gt_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 44 |
+
gt_trans = (target - gt_median) / (gt_s + self.eps)
|
| 45 |
+
|
| 46 |
+
pred_median = prediction_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 47 |
+
pred_median[torch.isnan(pred_median)] = 0
|
| 48 |
+
pred_diff = (torch.abs(prediction - pred_median) * mask_valid).reshape((B, C, -1))
|
| 49 |
+
pred_s = pred_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 50 |
+
pred_trans = (prediction - pred_median) / (pred_s + self.eps)
|
| 51 |
+
|
| 52 |
+
loss_sum = torch.sum(torch.abs(gt_trans - pred_trans)*mask_valid)
|
| 53 |
+
return loss_sum
|
| 54 |
+
|
| 55 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 56 |
+
"""
|
| 57 |
+
Calculate loss.
|
| 58 |
+
"""
|
| 59 |
+
B, C, H, W = target.shape
|
| 60 |
+
|
| 61 |
+
loss = 0.0
|
| 62 |
+
valid_pix = 0.0
|
| 63 |
+
|
| 64 |
+
device = target.device
|
| 65 |
+
|
| 66 |
+
batches_dataset = kwargs['dataset']
|
| 67 |
+
self.batch_valid = torch.tensor([1 if batch_dataset not in self.disable_dataset else 0 \
|
| 68 |
+
for batch_dataset in batches_dataset], device=device)[:,None,None,None]
|
| 69 |
+
|
| 70 |
+
batch_limit = 4
|
| 71 |
+
loops = int(np.ceil(self.random_num / batch_limit))
|
| 72 |
+
for i in range(B):
|
| 73 |
+
mask_i = mask[i, ...] #[1, H, W]
|
| 74 |
+
|
| 75 |
+
if self.batch_valid[i, ...] < 0.5:
|
| 76 |
+
loss += 0 * torch.sum(prediction[i, ...])
|
| 77 |
+
valid_pix += 0 * torch.sum(mask_i)
|
| 78 |
+
continue
|
| 79 |
+
|
| 80 |
+
pred_i = prediction[i, ...].unsqueeze(0).repeat(batch_limit, 1, 1, 1)
|
| 81 |
+
target_i = target[i, ...].unsqueeze(0).repeat(batch_limit, 1, 1, 1)
|
| 82 |
+
mask_random_drange = self.get_random_masks_for_batch(target[i, ...], mask_i) # [N, 1, H, W]
|
| 83 |
+
for j in range(loops):
|
| 84 |
+
mask_random_loopi = mask_random_drange[j*batch_limit:(j+1)*batch_limit, ...]
|
| 85 |
+
loss += self.ssi_mae(
|
| 86 |
+
prediction=pred_i[:mask_random_loopi.shape[0], ...],
|
| 87 |
+
target=target_i[:mask_random_loopi.shape[0], ...],
|
| 88 |
+
mask_valid=mask_random_loopi)
|
| 89 |
+
valid_pix += torch.sum(mask_random_loopi)
|
| 90 |
+
|
| 91 |
+
loss = loss / (valid_pix + self.eps)
|
| 92 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 93 |
+
loss = 0 * torch.sum(prediction)
|
| 94 |
+
print(f'HDNL NAN error, {loss}, valid pix: {valid_pix}')
|
| 95 |
+
return loss * self.loss_weight
|
| 96 |
+
|
| 97 |
+
if __name__ == '__main__':
|
| 98 |
+
ssil = HDNRandomLoss()
|
| 99 |
+
pred = torch.rand((2, 1, 256, 256)).cuda()
|
| 100 |
+
gt = - torch.rand((2, 1, 256, 256)).cuda()#torch.zeros_like(pred).cuda() #
|
| 101 |
+
gt[:, :, 100:256, 0:100] = -1
|
| 102 |
+
mask = gt > 0
|
| 103 |
+
out = ssil(pred, gt, mask)
|
| 104 |
+
print(out)
|
external/Metric3D/training/mono/model/losses/HDSNL.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class HDSNLoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Hieratical depth spatial normalization loss.
|
| 7 |
+
loss = MAE((d-median(d)/s - (d'-median(d'))/s'), s = mean(d- median(d))
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self, loss_weight=1.0, grid=3, data_type=['sfm', 'stereo', 'lidar'], **kwargs):
|
| 10 |
+
super(HDSNLoss, self).__init__()
|
| 11 |
+
self.loss_weight = loss_weight
|
| 12 |
+
self.grid = grid
|
| 13 |
+
self.data_type = data_type
|
| 14 |
+
|
| 15 |
+
def get_hierachy_masks(self, batch, image_size, mask):
|
| 16 |
+
height, width = image_size
|
| 17 |
+
anchor_power = [(1 / 2) ** (i) for i in range(self.grid)]
|
| 18 |
+
anchor_power.reverse()
|
| 19 |
+
|
| 20 |
+
map_grid_list = []
|
| 21 |
+
for anchor in anchor_power: # e.g. 1/8
|
| 22 |
+
for h in range(int(1 / anchor)):
|
| 23 |
+
for w in range(int(1 / anchor)):
|
| 24 |
+
mask_new = torch.zeros((batch, 1, height, width), dtype=torch.bool).cuda()
|
| 25 |
+
mask_new[:, :, int(h * anchor * height):int((h + 1) * anchor * height),
|
| 26 |
+
int(w * anchor * width):int((w + 1) * anchor * width)] = True
|
| 27 |
+
mask_new = mask & mask_new
|
| 28 |
+
map_grid_list.append(mask_new)
|
| 29 |
+
batch_map_grid=torch.stack(map_grid_list,dim=0) # [N, B, 1, H, W]
|
| 30 |
+
|
| 31 |
+
return batch_map_grid
|
| 32 |
+
|
| 33 |
+
def ssi_mae(self, prediction, target, mask_valid):
|
| 34 |
+
B, C, H, W = target.shape
|
| 35 |
+
prediction_nan = prediction.clone()
|
| 36 |
+
target_nan = target.clone()
|
| 37 |
+
prediction_nan[~mask_valid] = float('nan')
|
| 38 |
+
target_nan[~mask_valid] = float('nan')
|
| 39 |
+
|
| 40 |
+
valid_pixs = mask_valid.reshape((B, C,-1)).sum(dim=2, keepdims=True) + 1e-10
|
| 41 |
+
valid_pixs = valid_pixs[:, :, :, None]
|
| 42 |
+
|
| 43 |
+
gt_median = target_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 44 |
+
gt_median[torch.isnan(gt_median)] = 0
|
| 45 |
+
gt_diff = (torch.abs(target - gt_median) * mask_valid).reshape((B, C, -1))
|
| 46 |
+
gt_s = gt_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 47 |
+
gt_trans = (target - gt_median) / (gt_s + 1e-8)
|
| 48 |
+
|
| 49 |
+
pred_median = prediction_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 50 |
+
pred_median[torch.isnan(pred_median)] = 0
|
| 51 |
+
pred_diff = (torch.abs(prediction - pred_median) * mask_valid).reshape((B, C, -1))
|
| 52 |
+
pred_s = pred_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 53 |
+
pred_trans = (prediction - pred_median) / (pred_s + 1e-8)
|
| 54 |
+
|
| 55 |
+
loss = torch.sum(torch.abs(gt_trans - pred_trans)*mask_valid) / (torch.sum(mask_valid) + 1e-8)
|
| 56 |
+
return pred_trans, gt_trans, loss
|
| 57 |
+
|
| 58 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 59 |
+
"""
|
| 60 |
+
Calculate loss.
|
| 61 |
+
"""
|
| 62 |
+
B, C, H, W = target.shape
|
| 63 |
+
hierachy_masks = self.get_hierachy_masks(B, (H, W), mask) # [N, B, 1, H, W]
|
| 64 |
+
hierachy_masks_shape = hierachy_masks.reshape(-1, C, H, W)
|
| 65 |
+
prediction_hie = prediction.unsqueeze(0).repeat(hierachy_masks.shape[0], 1, 1, 1, 1).reshape(-1, C, H, W)
|
| 66 |
+
|
| 67 |
+
target_hie = target.unsqueeze(0).repeat(hierachy_masks.shape[0], 1, 1, 1, 1).reshape(-1, C, H, W)
|
| 68 |
+
|
| 69 |
+
#_, _, loss = self.ssi_mae(prediction, target, mask)
|
| 70 |
+
_, _, loss = self.ssi_mae(prediction_hie, target_hie, hierachy_masks_shape)
|
| 71 |
+
return loss * self.loss_weight
|
| 72 |
+
|
| 73 |
+
if __name__ == '__main__':
|
| 74 |
+
torch.manual_seed(1)
|
| 75 |
+
torch.cuda.manual_seed_all(1)
|
| 76 |
+
ssil = HDSNLoss()
|
| 77 |
+
pred = torch.rand((2, 1, 256, 256)).cuda()
|
| 78 |
+
gt = torch.rand((2, 1, 256, 256)).cuda()#torch.zeros_like(pred).cuda() #
|
| 79 |
+
gt[:, :, 100:256, 0:100] = -1
|
| 80 |
+
mask = gt > 0
|
| 81 |
+
out = ssil(pred, gt, mask)
|
| 82 |
+
print(out)
|
external/Metric3D/training/mono/model/losses/HDSNL_random.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
#from numba import jit
|
| 5 |
+
|
| 6 |
+
class HDSNRandomLoss(nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Hieratical depth spatial normalization loss.
|
| 9 |
+
Replace the original grid masks with the random created masks.
|
| 10 |
+
loss = MAE((d-median(d)/s - (d'-median(d'))/s'), s = mean(d- median(d))
|
| 11 |
+
"""
|
| 12 |
+
def __init__(self, loss_weight=1.0, random_num=32, data_type=['sfm', 'stereo', 'lidar', 'denselidar', 'denselidar_nometric','denselidar_syn'], disable_dataset=['MapillaryPSD'], sky_id=142, batch_limit=8, **kwargs):
|
| 13 |
+
super(HDSNRandomLoss, self).__init__()
|
| 14 |
+
self.loss_weight = loss_weight
|
| 15 |
+
self.random_num = random_num
|
| 16 |
+
self.data_type = data_type
|
| 17 |
+
self.sky_id = sky_id
|
| 18 |
+
self.batch_limit = batch_limit
|
| 19 |
+
self.eps = 1e-6
|
| 20 |
+
self.disable_dataset = disable_dataset
|
| 21 |
+
|
| 22 |
+
def get_random_masks_for_batch(self, image_size: list)-> torch.Tensor:
|
| 23 |
+
height, width = image_size
|
| 24 |
+
crop_h_min = int(0.125 * height)
|
| 25 |
+
crop_h_max = int(0.5 * height)
|
| 26 |
+
crop_w_min = int(0.125 * width)
|
| 27 |
+
crop_w_max = int(0.5 * width)
|
| 28 |
+
h_max = height - crop_h_min
|
| 29 |
+
w_max = width - crop_w_min
|
| 30 |
+
crop_height = np.random.choice(np.arange(crop_h_min, crop_h_max), self.random_num, replace=False)
|
| 31 |
+
crop_width = np.random.choice(np.arange(crop_w_min, crop_w_max), self.random_num, replace=False)
|
| 32 |
+
crop_y = np.random.choice(h_max, self.random_num, replace=False)
|
| 33 |
+
crop_x = np.random.choice(w_max, self.random_num, replace=False)
|
| 34 |
+
crop_y_end = crop_height + crop_y
|
| 35 |
+
crop_y_end[crop_y_end>=height] = height
|
| 36 |
+
crop_x_end = crop_width + crop_x
|
| 37 |
+
crop_x_end[crop_x_end>=width] = width
|
| 38 |
+
|
| 39 |
+
mask_new = torch.zeros((self.random_num, height, width), dtype=torch.bool, device="cuda") #.cuda() #[N, H, W]
|
| 40 |
+
for i in range(self.random_num):
|
| 41 |
+
mask_new[i, crop_y[i]:crop_y_end[i], crop_x[i]:crop_x_end[i]] = True
|
| 42 |
+
|
| 43 |
+
return mask_new
|
| 44 |
+
#return crop_y, crop_y_end, crop_x, crop_x_end
|
| 45 |
+
|
| 46 |
+
def reorder_sem_masks(self, sem_label):
|
| 47 |
+
# reorder the semantic mask of a batch
|
| 48 |
+
assert sem_label.ndim == 3
|
| 49 |
+
semantic_ids = torch.unique(sem_label[(sem_label>0) & (sem_label != self.sky_id)])
|
| 50 |
+
sem_masks = [sem_label == id for id in semantic_ids]
|
| 51 |
+
if len(sem_masks) == 0:
|
| 52 |
+
# no valid semantic labels
|
| 53 |
+
out = sem_label > 0
|
| 54 |
+
return out
|
| 55 |
+
|
| 56 |
+
sem_masks = torch.cat(sem_masks, dim=0)
|
| 57 |
+
mask_batch = torch.sum(sem_masks.reshape(sem_masks.shape[0], -1), dim=1) > 500
|
| 58 |
+
sem_masks = sem_masks[mask_batch]
|
| 59 |
+
if sem_masks.shape[0] > self.random_num:
|
| 60 |
+
balance_samples = np.random.choice(sem_masks.shape[0], self.random_num, replace=False)
|
| 61 |
+
sem_masks = sem_masks[balance_samples, ...]
|
| 62 |
+
|
| 63 |
+
if sem_masks.shape[0] == 0:
|
| 64 |
+
# no valid semantic labels
|
| 65 |
+
out = sem_label > 0
|
| 66 |
+
return out
|
| 67 |
+
|
| 68 |
+
if sem_masks.ndim == 2:
|
| 69 |
+
sem_masks = sem_masks[None, :, :]
|
| 70 |
+
return sem_masks
|
| 71 |
+
|
| 72 |
+
def ssi_mae(self, prediction, target, mask_valid):
|
| 73 |
+
B, C, H, W = target.shape
|
| 74 |
+
prediction_nan = prediction.clone().detach()
|
| 75 |
+
target_nan = target.clone()
|
| 76 |
+
prediction_nan[~mask_valid] = float('nan')
|
| 77 |
+
target_nan[~mask_valid] = float('nan')
|
| 78 |
+
|
| 79 |
+
valid_pixs = mask_valid.reshape((B, C,-1)).sum(dim=2, keepdims=True) + 1e-10
|
| 80 |
+
valid_pixs = valid_pixs[:, :, :, None]
|
| 81 |
+
|
| 82 |
+
gt_median = target_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 83 |
+
gt_median[torch.isnan(gt_median)] = 0
|
| 84 |
+
gt_diff = (torch.abs(target - gt_median) ).reshape((B, C, -1))
|
| 85 |
+
gt_s = gt_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 86 |
+
gt_trans = (target - gt_median) / (gt_s + self.eps)
|
| 87 |
+
|
| 88 |
+
pred_median = prediction_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 89 |
+
pred_median[torch.isnan(pred_median)] = 0
|
| 90 |
+
pred_diff = (torch.abs(prediction - pred_median)).reshape((B, C, -1))
|
| 91 |
+
pred_s = pred_diff.sum(dim=2)[:, :, None, None] / valid_pixs
|
| 92 |
+
pred_trans = (prediction - pred_median) / (pred_s + self.eps)
|
| 93 |
+
|
| 94 |
+
loss_sum = torch.sum(torch.abs(gt_trans - pred_trans)*mask_valid)
|
| 95 |
+
return loss_sum
|
| 96 |
+
|
| 97 |
+
def conditional_ssi_mae(self, prediction, target, mask_valid):
|
| 98 |
+
B, C, H, W = target.shape
|
| 99 |
+
conditional_rank_ids = np.random.choice(B, B, replace=False)
|
| 100 |
+
|
| 101 |
+
prediction_nan = prediction.clone()
|
| 102 |
+
target_nan = target.clone()
|
| 103 |
+
prediction_nan[~mask_valid] = float('nan')
|
| 104 |
+
target_nan[~mask_valid] = float('nan')
|
| 105 |
+
|
| 106 |
+
valid_pixs = mask_valid.reshape((B, C,-1)).sum(dim=2, keepdims=True) + self.eps
|
| 107 |
+
valid_pixs = valid_pixs[:, :, :, None].contiguous()
|
| 108 |
+
|
| 109 |
+
gt_median = target_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 110 |
+
gt_median[torch.isnan(gt_median)] = 0
|
| 111 |
+
gt_diff = (torch.abs(target - gt_median) * mask_valid).reshape((B, C,-1))
|
| 112 |
+
gt_s = gt_diff.sum(dim=2)[:, :, None, None].contiguous() / valid_pixs
|
| 113 |
+
|
| 114 |
+
# in case some batches have no valid pixels
|
| 115 |
+
gt_s_small_mask = gt_s < (torch.mean(gt_s)*0.1)
|
| 116 |
+
gt_s[gt_s_small_mask] = torch.mean(gt_s)
|
| 117 |
+
gt_trans = (target - gt_median[conditional_rank_ids]) / (gt_s[conditional_rank_ids] + self.eps)
|
| 118 |
+
|
| 119 |
+
pred_median = prediction_nan.reshape((B, C,-1)).nanmedian(2, keepdims=True)[0].unsqueeze(-1) # [b,c,h,w]
|
| 120 |
+
pred_median[torch.isnan(pred_median)] = 0
|
| 121 |
+
pred_diff = (torch.abs(prediction - pred_median) * mask_valid).reshape((B, C,-1))
|
| 122 |
+
pred_s = pred_diff.sum(dim=2)[:, :, None, None].contiguous() / valid_pixs
|
| 123 |
+
pred_s[gt_s_small_mask] = torch.mean(pred_s)
|
| 124 |
+
pred_trans = (prediction - pred_median[conditional_rank_ids]) / (pred_s[conditional_rank_ids] + self.eps)
|
| 125 |
+
|
| 126 |
+
loss_sum = torch.sum(torch.abs(gt_trans - pred_trans)*mask_valid)
|
| 127 |
+
# print(torch.abs(gt_trans - pred_trans)[mask_valid])
|
| 128 |
+
return loss_sum
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def forward(self, prediction, target, mask=None, sem_mask=None, **kwargs):
|
| 132 |
+
"""
|
| 133 |
+
Calculate loss.
|
| 134 |
+
"""
|
| 135 |
+
B, C, H, W = target.shape
|
| 136 |
+
|
| 137 |
+
loss = 0.0
|
| 138 |
+
valid_pix = 0.0
|
| 139 |
+
|
| 140 |
+
device = target.device
|
| 141 |
+
|
| 142 |
+
batches_dataset = kwargs['dataset']
|
| 143 |
+
self.batch_valid = torch.tensor([1 if batch_dataset not in self.disable_dataset else 0 \
|
| 144 |
+
for batch_dataset in batches_dataset], device=device)[:,None,None,None]
|
| 145 |
+
|
| 146 |
+
batch_limit = self.batch_limit
|
| 147 |
+
|
| 148 |
+
random_sample_masks = self.get_random_masks_for_batch((H, W)) # [N, H, W]
|
| 149 |
+
for i in range(B):
|
| 150 |
+
# each batch
|
| 151 |
+
mask_i = mask[i, ...] #[1, H, W]
|
| 152 |
+
if self.batch_valid[i, ...] < 0.5:
|
| 153 |
+
loss += 0 * torch.sum(prediction[i, ...])
|
| 154 |
+
valid_pix += 0 * torch.sum(mask_i)
|
| 155 |
+
continue
|
| 156 |
+
|
| 157 |
+
pred_i = prediction[i, ...].unsqueeze(0).repeat(batch_limit, 1, 1, 1)
|
| 158 |
+
target_i = target[i, ...].unsqueeze(0).repeat(batch_limit, 1, 1, 1)
|
| 159 |
+
|
| 160 |
+
# get semantic masks
|
| 161 |
+
sem_label_i = sem_mask[i, ...] if sem_mask is not None else None
|
| 162 |
+
if sem_label_i is not None:
|
| 163 |
+
sem_masks = self.reorder_sem_masks(sem_label_i) # [N, H, W]
|
| 164 |
+
random_sem_masks = torch.cat([random_sample_masks, sem_masks], dim=0)
|
| 165 |
+
else:
|
| 166 |
+
random_sem_masks = random_sample_masks
|
| 167 |
+
#random_sem_masks = random_sample_masks
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
sampled_masks_num = random_sem_masks.shape[0]
|
| 171 |
+
loops = int(np.ceil(sampled_masks_num / batch_limit))
|
| 172 |
+
conditional_rank_ids = np.random.choice(sampled_masks_num, sampled_masks_num, replace=False)
|
| 173 |
+
|
| 174 |
+
for j in range(loops):
|
| 175 |
+
mask_random_sem_loopi = random_sem_masks[j*batch_limit:(j+1)*batch_limit, ...]
|
| 176 |
+
mask_sample = (mask_i & mask_random_sem_loopi).unsqueeze(1) # [N, 1, H, W]
|
| 177 |
+
loss += self.ssi_mae(
|
| 178 |
+
prediction=pred_i[:mask_sample.shape[0], ...],
|
| 179 |
+
target=target_i[:mask_sample.shape[0], ...],
|
| 180 |
+
mask_valid=mask_sample)
|
| 181 |
+
valid_pix += torch.sum(mask_sample)
|
| 182 |
+
|
| 183 |
+
# conditional ssi loss
|
| 184 |
+
# rerank_mask_random_sem_loopi = random_sem_masks[conditional_rank_ids, ...][j*batch_limit:(j+1)*batch_limit, ...]
|
| 185 |
+
# rerank_mask_sample = (mask_i & rerank_mask_random_sem_loopi).unsqueeze(1) # [N, 1, H, W]
|
| 186 |
+
# loss_cond = self.conditional_ssi_mae(
|
| 187 |
+
# prediction=pred_i[:rerank_mask_sample.shape[0], ...],
|
| 188 |
+
# target=target_i[:rerank_mask_sample.shape[0], ...],
|
| 189 |
+
# mask_valid=rerank_mask_sample)
|
| 190 |
+
# print(loss_cond / (torch.sum(rerank_mask_sample) + 1e-10), loss_cond, torch.sum(rerank_mask_sample))
|
| 191 |
+
# loss += loss_cond
|
| 192 |
+
# valid_pix += torch.sum(rerank_mask_sample)
|
| 193 |
+
|
| 194 |
+
# crop_y, crop_y_end, crop_x, crop_x_end = self.get_random_masks_for_batch((H, W)) # [N,]
|
| 195 |
+
# for j in range(B):
|
| 196 |
+
# for i in range(self.random_num):
|
| 197 |
+
# mask_crop = mask[j, :, crop_y[i]:crop_y_end[i], crop_x[i]:crop_x_end[i]][None, ...] #[1, 1, crop_h, crop_w]
|
| 198 |
+
# target_crop = target[j, :, crop_y[i]:crop_y_end[i], crop_x[i]:crop_x_end[i]][None, ...]
|
| 199 |
+
# pred_crop = prediction[j, :, crop_y[i]:crop_y_end[i], crop_x[i]:crop_x_end[i]][None, ...]
|
| 200 |
+
# loss += self.ssi_mae(prediction=pred_crop, target=target_crop, mask_valid=mask_crop)
|
| 201 |
+
# valid_pix += torch.sum(mask_crop)
|
| 202 |
+
|
| 203 |
+
# the whole image
|
| 204 |
+
mask = mask * self.batch_valid.bool()
|
| 205 |
+
loss += self.ssi_mae(
|
| 206 |
+
prediction=prediction,
|
| 207 |
+
target=target,
|
| 208 |
+
mask_valid=mask)
|
| 209 |
+
valid_pix += torch.sum(mask)
|
| 210 |
+
loss = loss / (valid_pix + self.eps)
|
| 211 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 212 |
+
loss = 0 * torch.sum(prediction)
|
| 213 |
+
print(f'HDSNL NAN error, {loss}, valid pix: {valid_pix}')
|
| 214 |
+
return loss * self.loss_weight
|
| 215 |
+
|
| 216 |
+
if __name__ == '__main__':
|
| 217 |
+
torch.manual_seed(1)
|
| 218 |
+
torch.cuda.manual_seed_all(1)
|
| 219 |
+
ssil = HDSNRandomLoss()
|
| 220 |
+
pred = torch.rand((8, 1, 256, 512)).cuda()
|
| 221 |
+
gt = torch.rand((8, 1, 256, 512)).cuda()#torch.zeros_like(pred).cuda() #
|
| 222 |
+
gt[1:, :, 100:256, 100:350] = -1
|
| 223 |
+
gt[:2, ...] = -1
|
| 224 |
+
mask = gt > 0
|
| 225 |
+
sem_mask = np.random.randint(-1, 200, (8, 1, 256, 512))
|
| 226 |
+
sem_mask[sem_mask>0] = -1
|
| 227 |
+
sem_mask_torch = torch.from_numpy(sem_mask).cuda()
|
| 228 |
+
|
| 229 |
+
out = ssil(pred, gt, mask, sem_mask_torch)
|
| 230 |
+
print(out)
|
external/Metric3D/training/mono/model/losses/L1.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class L1Loss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Compute L1 loss.
|
| 7 |
+
"""
|
| 8 |
+
def __init__(self, loss_weight=1, data_type=['lidar', 'denselidar', 'stereo', 'denselidar_syn'], **kwargs):
|
| 9 |
+
super(L1Loss, self).__init__()
|
| 10 |
+
self.loss_weight = loss_weight
|
| 11 |
+
self.data_type = data_type
|
| 12 |
+
self.eps = 1e-6
|
| 13 |
+
|
| 14 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 15 |
+
diff = torch.abs(prediction - target)* mask
|
| 16 |
+
loss = torch.sum(diff) / (torch.sum(mask) + self.eps)
|
| 17 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 18 |
+
loss = 0 * torch.sum(prediction)
|
| 19 |
+
print(f'L1 NAN error, {loss}')
|
| 20 |
+
#raise RuntimeError(f'Silog error, {loss}, d_square_mean: {d_square_mean}, d_mean: {d_mean}')
|
| 21 |
+
return loss * self.loss_weight
|
| 22 |
+
|
| 23 |
+
class L1DispLoss(nn.Module):
|
| 24 |
+
"""
|
| 25 |
+
Compute L1 disparity loss of disparity.
|
| 26 |
+
"""
|
| 27 |
+
def __init__(self, loss_weight=1, data_type=['lidar', 'denselidar', 'stereo', 'denselidar_syn'], **kwargs):
|
| 28 |
+
super(L1DispLoss, self).__init__()
|
| 29 |
+
self.loss_weight = loss_weight
|
| 30 |
+
self.data_type = data_type
|
| 31 |
+
self.eps = 1e-6
|
| 32 |
+
|
| 33 |
+
def forward(self, prediction_disp, inv_depth, mask=None, **kwargs):
|
| 34 |
+
# gt_disp_mask = ~torch.all(inv_depth == 0, dim=1, keepdim=True)
|
| 35 |
+
# if mask is None:
|
| 36 |
+
# mask = gt_disp_mask
|
| 37 |
+
diff = torch.abs(prediction_disp - inv_depth)* mask
|
| 38 |
+
loss = torch.sum(diff) / (torch.sum(mask) + self.eps)
|
| 39 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 40 |
+
loss = 0 * torch.sum(prediction_disp)
|
| 41 |
+
#raise RuntimeError(f'Silog error, {loss}, d_square_mean: {d_square_mean}, d_mean: {d_mean}')
|
| 42 |
+
return loss * self.loss_weight
|
| 43 |
+
|
| 44 |
+
class L1InverseLoss(nn.Module):
|
| 45 |
+
"""
|
| 46 |
+
Compute L1 disparity loss of disparity.
|
| 47 |
+
"""
|
| 48 |
+
def __init__(self, loss_weight=1, data_type=['lidar', 'denselidar', 'stereo'], **kwargs):
|
| 49 |
+
super(L1InverseLoss, self).__init__()
|
| 50 |
+
self.loss_weight = loss_weight
|
| 51 |
+
self.data_type = data_type
|
| 52 |
+
self.eps = 1e-6
|
| 53 |
+
|
| 54 |
+
def forward(self, prediction, inv_depth, mask=None, **kwargs):
|
| 55 |
+
mask = torch.logical_and(mask, inv_depth>0)
|
| 56 |
+
inv_pred = 1.0 / prediction * 10.0
|
| 57 |
+
inv_pred[~mask] = -1
|
| 58 |
+
diff = torch.abs(inv_pred - inv_depth)* mask
|
| 59 |
+
loss = torch.sum(diff) / (torch.sum(mask) + self.eps)
|
| 60 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 61 |
+
loss = 0 * torch.sum(inv_pred)
|
| 62 |
+
#raise RuntimeError(f'Silog error, {loss}, d_square_mean: {d_square_mean}, d_mean: {d_mean}')
|
| 63 |
+
return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/NormalBranchLoss.py
ADDED
|
@@ -0,0 +1,732 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from .depth_to_normal import Depth2Normal
|
| 6 |
+
|
| 7 |
+
# compute loss
|
| 8 |
+
class NormalBranchLoss(nn.Module):
|
| 9 |
+
def __init__(self, loss_weight=1.0, data_type=['sfm', 'stereo', 'denselidar', 'denselidar_syn'], d2n_dataset=['ScanNetAll'], loss_fn='UG_NLL_ours', **kwargs):
|
| 10 |
+
"""loss_fn can be one of following:
|
| 11 |
+
- L1 - L1 loss (no uncertainty)
|
| 12 |
+
- L2 - L2 loss (no uncertainty)
|
| 13 |
+
- AL - Angular loss (no uncertainty)
|
| 14 |
+
- NLL_vMF - NLL of vonMF distribution
|
| 15 |
+
- NLL_ours - NLL of Angular vonMF distribution
|
| 16 |
+
- UG_NLL_vMF - NLL of vonMF distribution (+ pixel-wise MLP + uncertainty-guided sampling)
|
| 17 |
+
- UG_NLL_ours - NLL of Angular vonMF distribution (+ pixel-wise MLP + uncertainty-guided sampling)
|
| 18 |
+
- NLL_ours_GRU - NLL of Angular vonMF distribution for GRU sequence
|
| 19 |
+
"""
|
| 20 |
+
super(NormalBranchLoss, self).__init__()
|
| 21 |
+
self.loss_type = loss_fn
|
| 22 |
+
if self.loss_type in ['L1', 'L2', 'AL', 'NLL_vMF', 'NLL_ours']:
|
| 23 |
+
# self.loss_fn = self.forward_R
|
| 24 |
+
raise NotImplementedError
|
| 25 |
+
elif self.loss_type in ['UG_NLL_vMF']:
|
| 26 |
+
# self.loss_fn = self.forward_UG
|
| 27 |
+
raise NotImplementedError
|
| 28 |
+
elif self.loss_type in ['UG_NLL_ours']:
|
| 29 |
+
self.loss_fn = self.forward_UG
|
| 30 |
+
elif self.loss_type in ['NLL_ours_GRU', 'NLL_ours_GRU_auxi']:
|
| 31 |
+
self.loss_type = 'NLL_ours'
|
| 32 |
+
self.loss_fn = self.forward_GRU
|
| 33 |
+
self.loss_gamma = 0.9
|
| 34 |
+
try:
|
| 35 |
+
self.loss_weight_auxi = kwargs['loss_weight_auxi']
|
| 36 |
+
except:
|
| 37 |
+
self.loss_weight_auxi = 0.0
|
| 38 |
+
else:
|
| 39 |
+
raise Exception('invalid loss type')
|
| 40 |
+
|
| 41 |
+
self.loss_weight = loss_weight
|
| 42 |
+
self.data_type = data_type
|
| 43 |
+
|
| 44 |
+
#self.d2n_dataset = d2n_dataset
|
| 45 |
+
#self.depth2normal = Depth2Normal()
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def forward(self, **kwargs):
|
| 50 |
+
# device = kwargs['mask'].device
|
| 51 |
+
# B, _, H, W = kwargs['mask'].shape
|
| 52 |
+
# pad_mask = torch.zeros_like(kwargs['mask'], device=device)
|
| 53 |
+
# for b in range(B):
|
| 54 |
+
# pad = kwargs['pad'][b].squeeze()
|
| 55 |
+
# pad_mask[b, :, pad[0]:H-pad[1], pad[2]:W-pad[3]] = True
|
| 56 |
+
|
| 57 |
+
# loss = self.loss_fn(pad_mask=pad_mask, **kwargs)
|
| 58 |
+
loss = self.loss_fn(**kwargs)
|
| 59 |
+
|
| 60 |
+
return loss * self.loss_weight
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def forward_GRU(self, normal_out_list, normal, target, mask, intrinsic, pad_mask=None, auxi_normal=None, **kwargs):
|
| 64 |
+
n_predictions = len(normal_out_list)
|
| 65 |
+
assert n_predictions >= 1
|
| 66 |
+
loss = 0.0
|
| 67 |
+
|
| 68 |
+
# device = pad_mask.device
|
| 69 |
+
# batches_dataset = kwargs['dataset']
|
| 70 |
+
# self.batch_with_d2n = torch.tensor([0 if batch_dataset not in self.d2n_dataset else 1 \
|
| 71 |
+
# for batch_dataset in batches_dataset], device=device)[:,None,None,None]
|
| 72 |
+
|
| 73 |
+
# scale = kwargs['scale'][:, None, None].float()
|
| 74 |
+
# normal_d2n, new_mask_d2n = self.depth2normal(target, intrinsic, pad_mask, scale)
|
| 75 |
+
|
| 76 |
+
gt_normal_mask = ~torch.all(normal == 0, dim=1, keepdim=True) & mask
|
| 77 |
+
|
| 78 |
+
if auxi_normal != None:
|
| 79 |
+
auxi_normal_mask = ~gt_normal_mask
|
| 80 |
+
|
| 81 |
+
#normal = normal * (1 - self.batch_with_d2n) + normal_d2n * self.batch_with_d2n
|
| 82 |
+
# gt_normal_mask = gt_normal_mask * (1 - self.batch_with_d2n) + mask * new_mask_d2n * self.batch_with_d2n
|
| 83 |
+
|
| 84 |
+
if gt_normal_mask.sum() < 10:
|
| 85 |
+
if auxi_normal == None:
|
| 86 |
+
for norm_out in normal_out_list:
|
| 87 |
+
loss += norm_out.sum() * 0
|
| 88 |
+
return loss
|
| 89 |
+
|
| 90 |
+
for i, norm_out in enumerate(normal_out_list):
|
| 91 |
+
# We adjust the loss_gamma so it is consistent for any number of RAFT-Stereo iterations
|
| 92 |
+
adjusted_loss_gamma = self.loss_gamma**(15/(n_predictions - 1))
|
| 93 |
+
i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
|
| 94 |
+
|
| 95 |
+
curr_loss = self.forward_R(norm_out.clone(), normal, gt_normal_mask)
|
| 96 |
+
if auxi_normal != None:
|
| 97 |
+
auxi_loss = self.forward_R(norm_out.clone(), auxi_normal[:, :3], auxi_normal_mask)
|
| 98 |
+
curr_loss = curr_loss + self.loss_weight_auxi * auxi_loss
|
| 99 |
+
|
| 100 |
+
if torch.isnan(curr_loss).item() | torch.isinf(curr_loss).item():
|
| 101 |
+
curr_loss = 0 * torch.sum(norm_out)
|
| 102 |
+
print(f'NormalBranchLoss forward_GRU NAN error, {curr_loss}')
|
| 103 |
+
|
| 104 |
+
loss += curr_loss * i_weight
|
| 105 |
+
|
| 106 |
+
return loss
|
| 107 |
+
|
| 108 |
+
def forward_R(self, norm_out, gt_norm, gt_norm_mask):
|
| 109 |
+
pred_norm, pred_kappa = norm_out[:, 0:3, :, :], norm_out[:, 3:, :, :]
|
| 110 |
+
|
| 111 |
+
if self.loss_type == 'L1':
|
| 112 |
+
l1 = torch.sum(torch.abs(gt_norm - pred_norm), dim=1, keepdim=True)
|
| 113 |
+
loss = torch.mean(l1[gt_norm_mask])
|
| 114 |
+
|
| 115 |
+
elif self.loss_type == 'L2':
|
| 116 |
+
l2 = torch.sum(torch.square(gt_norm - pred_norm), dim=1, keepdim=True)
|
| 117 |
+
loss = torch.mean(l2[gt_norm_mask])
|
| 118 |
+
|
| 119 |
+
elif self.loss_type == 'AL':
|
| 120 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 121 |
+
|
| 122 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 123 |
+
* (dot.detach() < 0.999).float() \
|
| 124 |
+
* (dot.detach() > -0.999).float()
|
| 125 |
+
valid_mask = valid_mask > 0.0
|
| 126 |
+
|
| 127 |
+
al = torch.acos(dot[valid_mask])
|
| 128 |
+
loss = torch.mean(al)
|
| 129 |
+
|
| 130 |
+
elif self.loss_type == 'NLL_vMF':
|
| 131 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 132 |
+
|
| 133 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 134 |
+
* (dot.detach() < 0.999).float() \
|
| 135 |
+
* (dot.detach() > -0.999).float()
|
| 136 |
+
valid_mask = valid_mask > 0.0
|
| 137 |
+
|
| 138 |
+
dot = dot[valid_mask]
|
| 139 |
+
kappa = pred_kappa[:, 0, :, :][valid_mask]
|
| 140 |
+
|
| 141 |
+
loss_pixelwise = - torch.log(kappa) \
|
| 142 |
+
- (kappa * (dot - 1)) \
|
| 143 |
+
+ torch.log(1 - torch.exp(- 2 * kappa))
|
| 144 |
+
loss = torch.mean(loss_pixelwise)
|
| 145 |
+
|
| 146 |
+
elif self.loss_type == 'NLL_ours':
|
| 147 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 148 |
+
|
| 149 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 150 |
+
* (dot.detach() < 0.999).float() \
|
| 151 |
+
* (dot.detach() > -0.999).float()
|
| 152 |
+
valid_mask = valid_mask > 0.5
|
| 153 |
+
|
| 154 |
+
dot = dot[valid_mask]
|
| 155 |
+
kappa = pred_kappa[:, 0, :, :][valid_mask]
|
| 156 |
+
|
| 157 |
+
loss_pixelwise = - torch.log(torch.square(kappa) + 1) \
|
| 158 |
+
+ kappa * torch.acos(dot) \
|
| 159 |
+
+ torch.log(1 + torch.exp(-kappa * np.pi))
|
| 160 |
+
loss = torch.mean(loss_pixelwise)
|
| 161 |
+
|
| 162 |
+
else:
|
| 163 |
+
raise Exception('invalid loss type')
|
| 164 |
+
|
| 165 |
+
return loss
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
def forward_UG(self, normal_pred_list, normal_coord_list, normal, mask, **kwargs):
|
| 169 |
+
gt_normal_mask = ~torch.all(normal == 0, dim=1, keepdim=True) & mask
|
| 170 |
+
|
| 171 |
+
# gt_norm = norms[0]
|
| 172 |
+
# gt_normal_mask = (gt_norm[:, 0:1, :, :] == 0) & (gt_norm[:, 1:2, :, :] == 0) & (gt_norm[:, 2:3, :, :] == 0)
|
| 173 |
+
# gt_normal_mask = ~gt_normal_mask
|
| 174 |
+
loss = 0.0
|
| 175 |
+
|
| 176 |
+
if gt_normal_mask[gt_normal_mask].numel() < 10:
|
| 177 |
+
for (pred, coord) in zip(normal_pred_list, normal_coord_list):
|
| 178 |
+
if pred is not None:
|
| 179 |
+
loss += pred.sum() * 0.
|
| 180 |
+
if coord is not None:
|
| 181 |
+
loss += coord.sum() * 0.
|
| 182 |
+
return loss
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
for (pred, coord) in zip(normal_pred_list, normal_coord_list):
|
| 186 |
+
if coord is None:
|
| 187 |
+
pred = F.interpolate(pred, size=[normal.size(2), normal.size(3)], mode='bilinear', align_corners=True)
|
| 188 |
+
pred_norm, pred_kappa = pred[:, 0:3, :, :], pred[:, 3:, :, :]
|
| 189 |
+
|
| 190 |
+
# if self.loss_type == 'UG_NLL_vMF':
|
| 191 |
+
# dot = torch.cosine_similarity(pred_norm, normal, dim=1)
|
| 192 |
+
|
| 193 |
+
# valid_mask = normal_mask[:, 0, :, :].float() \
|
| 194 |
+
# * (dot.detach() < 0.999).float() \
|
| 195 |
+
# * (dot.detach() > -0.999).float()
|
| 196 |
+
# valid_mask = valid_mask > 0.5
|
| 197 |
+
|
| 198 |
+
# # mask
|
| 199 |
+
# dot = dot[valid_mask]
|
| 200 |
+
# kappa = pred_kappa[:, 0, :, :][valid_mask]
|
| 201 |
+
|
| 202 |
+
# loss_pixelwise = - torch.log(kappa) \
|
| 203 |
+
# - (kappa * (dot - 1)) \
|
| 204 |
+
# + torch.log(1 - torch.exp(- 2 * kappa))
|
| 205 |
+
# loss = loss + torch.mean(loss_pixelwise)
|
| 206 |
+
|
| 207 |
+
if self.loss_type == 'UG_NLL_ours':
|
| 208 |
+
dot = torch.cosine_similarity(pred_norm, normal, dim=1)
|
| 209 |
+
|
| 210 |
+
valid_mask = gt_normal_mask[:, 0, :, :].float() \
|
| 211 |
+
* (dot.detach() < 0.999).float() \
|
| 212 |
+
* (dot.detach() > -0.999).float()
|
| 213 |
+
valid_mask = valid_mask > 0.5
|
| 214 |
+
|
| 215 |
+
dot = dot[valid_mask]
|
| 216 |
+
kappa = pred_kappa[:, 0, :, :][valid_mask]
|
| 217 |
+
|
| 218 |
+
loss_pixelwise = - torch.log(torch.square(kappa) + 1) \
|
| 219 |
+
+ kappa * torch.acos(dot) \
|
| 220 |
+
+ torch.log(1 + torch.exp(-kappa * np.pi))
|
| 221 |
+
loss = loss + torch.mean(loss_pixelwise)
|
| 222 |
+
|
| 223 |
+
else:
|
| 224 |
+
raise Exception
|
| 225 |
+
|
| 226 |
+
else:
|
| 227 |
+
# coord: B, 1, N, 2
|
| 228 |
+
# pred: B, 4, N
|
| 229 |
+
gt_norm_ = F.grid_sample(normal, coord, mode='nearest', align_corners=True) # (B, 3, 1, N)
|
| 230 |
+
gt_norm_mask_ = F.grid_sample(gt_normal_mask.float(), coord, mode='nearest', align_corners=True) # (B, 1, 1, N)
|
| 231 |
+
gt_norm_ = gt_norm_[:, :, 0, :] # (B, 3, N)
|
| 232 |
+
gt_norm_mask_ = gt_norm_mask_[:, :, 0, :] > 0.5 # (B, 1, N)
|
| 233 |
+
|
| 234 |
+
pred_norm, pred_kappa = pred[:, 0:3, :], pred[:, 3:, :]
|
| 235 |
+
|
| 236 |
+
# if self.loss_type == 'UG_NLL_vMF':
|
| 237 |
+
# dot = torch.cosine_similarity(pred_norm, gt_norm_, dim=1) # (B, N)
|
| 238 |
+
|
| 239 |
+
# valid_mask = gt_norm_mask_[:, 0, :].float() \
|
| 240 |
+
# * (dot.detach() < 0.999).float() \
|
| 241 |
+
# * (dot.detach() > -0.999).float()
|
| 242 |
+
# valid_mask = valid_mask > 0.5
|
| 243 |
+
|
| 244 |
+
# dot = dot[valid_mask]
|
| 245 |
+
# kappa = pred_kappa[:, 0, :][valid_mask]
|
| 246 |
+
|
| 247 |
+
# loss_pixelwise = - torch.log(kappa) \
|
| 248 |
+
# - (kappa * (dot - 1)) \
|
| 249 |
+
# + torch.log(1 - torch.exp(- 2 * kappa))
|
| 250 |
+
# loss = loss + torch.mean(loss_pixelwise)
|
| 251 |
+
|
| 252 |
+
if self.loss_type == 'UG_NLL_ours':
|
| 253 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm_, dim=1) # (B, N)
|
| 254 |
+
|
| 255 |
+
valid_mask = gt_norm_mask_[:, 0, :].float() \
|
| 256 |
+
* (dot.detach() < 0.999).float() \
|
| 257 |
+
* (dot.detach() > -0.999).float()
|
| 258 |
+
valid_mask = valid_mask > 0.5
|
| 259 |
+
|
| 260 |
+
dot = dot[valid_mask]
|
| 261 |
+
kappa = pred_kappa[:, 0, :][valid_mask]
|
| 262 |
+
|
| 263 |
+
loss_pixelwise = - torch.log(torch.square(kappa) + 1) \
|
| 264 |
+
+ kappa * torch.acos(dot) \
|
| 265 |
+
+ torch.log(1 + torch.exp(-kappa * np.pi))
|
| 266 |
+
loss = loss + torch.mean(loss_pixelwise)
|
| 267 |
+
|
| 268 |
+
else:
|
| 269 |
+
raise Exception
|
| 270 |
+
return loss
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
# confidence-guided sampling
|
| 276 |
+
@torch.no_grad()
|
| 277 |
+
def sample_points(init_normal, confidence_map, gt_norm_mask, sampling_ratio, beta=1):
|
| 278 |
+
device = init_normal.device
|
| 279 |
+
B, _, H, W = init_normal.shape
|
| 280 |
+
N = int(sampling_ratio * H * W)
|
| 281 |
+
beta = beta
|
| 282 |
+
|
| 283 |
+
# confidence map
|
| 284 |
+
# confidence_map = init_normal[:, 3, :, :] # B, H, W
|
| 285 |
+
|
| 286 |
+
# gt_invalid_mask (B, H, W)
|
| 287 |
+
if gt_norm_mask is not None:
|
| 288 |
+
gt_invalid_mask = F.interpolate(gt_norm_mask.float(), size=[H, W], mode='nearest')
|
| 289 |
+
gt_invalid_mask = gt_invalid_mask < 0.5
|
| 290 |
+
confidence_map[gt_invalid_mask] = -1e4
|
| 291 |
+
|
| 292 |
+
# (B, H*W)
|
| 293 |
+
_, idx = confidence_map.view(B, -1).sort(1, descending=True)
|
| 294 |
+
|
| 295 |
+
# confidence sampling
|
| 296 |
+
if int(beta * N) > 0:
|
| 297 |
+
importance = idx[:, :int(beta * N)] # B, beta*N
|
| 298 |
+
|
| 299 |
+
# remaining
|
| 300 |
+
remaining = idx[:, int(beta * N):] # B, H*W - beta*N
|
| 301 |
+
|
| 302 |
+
# coverage
|
| 303 |
+
num_coverage = N - int(beta * N)
|
| 304 |
+
|
| 305 |
+
if num_coverage <= 0:
|
| 306 |
+
samples = importance
|
| 307 |
+
else:
|
| 308 |
+
coverage_list = []
|
| 309 |
+
for i in range(B):
|
| 310 |
+
idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
|
| 311 |
+
coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
|
| 312 |
+
coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
|
| 313 |
+
samples = torch.cat((importance, coverage), dim=1) # B, N
|
| 314 |
+
|
| 315 |
+
else:
|
| 316 |
+
# remaining
|
| 317 |
+
remaining = idx[:, :] # B, H*W
|
| 318 |
+
|
| 319 |
+
# coverage
|
| 320 |
+
num_coverage = N
|
| 321 |
+
|
| 322 |
+
coverage_list = []
|
| 323 |
+
for i in range(B):
|
| 324 |
+
idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
|
| 325 |
+
coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
|
| 326 |
+
coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
|
| 327 |
+
samples = coverage
|
| 328 |
+
|
| 329 |
+
# point coordinates
|
| 330 |
+
rows_int = samples // W # 0 for first row, H-1 for last row
|
| 331 |
+
# rows_float = rows_int / float(H-1) # 0 to 1.0
|
| 332 |
+
# rows_float = (rows_float * 2.0) - 1.0 # -1.0 to 1.0
|
| 333 |
+
|
| 334 |
+
cols_int = samples % W # 0 for first column, W-1 for last column
|
| 335 |
+
# cols_float = cols_int / float(W-1) # 0 to 1.0
|
| 336 |
+
# cols_float = (cols_float * 2.0) - 1.0 # -1.0 to 1.0
|
| 337 |
+
|
| 338 |
+
# point_coords = torch.zeros(B, 1, N, 2)
|
| 339 |
+
# point_coords[:, 0, :, 0] = cols_float # x coord
|
| 340 |
+
# point_coords[:, 0, :, 1] = rows_float # y coord
|
| 341 |
+
# point_coords = point_coords.to(device)
|
| 342 |
+
# return point_coords, rows_int, cols_int
|
| 343 |
+
|
| 344 |
+
sample_mask = torch.zeros((B,1,H,W), dtype=torch.bool, device=device)
|
| 345 |
+
for i in range(B):
|
| 346 |
+
sample_mask[i, :, rows_int[i,:], cols_int[i,:]] = True
|
| 347 |
+
return sample_mask
|
| 348 |
+
|
| 349 |
+
# depth-normal consistency loss
|
| 350 |
+
class DeNoConsistencyLoss(nn.Module):
|
| 351 |
+
def __init__(self, loss_weight=1.0, data_type=['stereo', 'lidar', 'denselidar', 'denselidar_nometric', 'denselidar_syn'], loss_fn='NLL_ours', \
|
| 352 |
+
sky_id=142, scale=1, norm_dataset=['Taskonomy', 'Matterport3D', 'Replica', 'Hypersim', 'NYU'], no_sky_dataset=['BigData', 'DIODE', 'Completion', 'Matterport3D'], disable_dataset=[], depth_detach=False, **kwargs):
|
| 353 |
+
"""loss_fn can be one of following:
|
| 354 |
+
- L1 - L1 loss (no uncertainty)
|
| 355 |
+
- L2 - L2 loss (no uncertainty)
|
| 356 |
+
- AL - Angular loss (no uncertainty)
|
| 357 |
+
- NLL_vMF - NLL of vonMF distribution
|
| 358 |
+
- NLL_ours - NLL of Angular vonMF distribution
|
| 359 |
+
- UG_NLL_vMF - NLL of vonMF distribution (+ pixel-wise MLP + uncertainty-guided sampling)
|
| 360 |
+
- UG_NLL_ours - NLL of Angular vonMF distribution (+ pixel-wise MLP + uncertainty-guided sampling)
|
| 361 |
+
- NLL_ours_GRU - NLL of Angular vonMF distribution for GRU sequence
|
| 362 |
+
- CEL - cosine embedding loss
|
| 363 |
+
- CEL_GRU
|
| 364 |
+
"""
|
| 365 |
+
super(DeNoConsistencyLoss, self).__init__()
|
| 366 |
+
self.loss_type = loss_fn
|
| 367 |
+
if self.loss_type in ['L1', 'L2', 'NLL_vMF']:
|
| 368 |
+
# self.loss_fn = self.forward_R
|
| 369 |
+
raise NotImplementedError
|
| 370 |
+
elif self.loss_type in ['UG_NLL_vMF']:
|
| 371 |
+
# self.loss_fn = self.forward_UG
|
| 372 |
+
raise NotImplementedError
|
| 373 |
+
elif self.loss_type in ['UG_NLL_ours']:
|
| 374 |
+
# self.loss_fn = self.forward_UG
|
| 375 |
+
raise NotImplementedError
|
| 376 |
+
elif self.loss_type in ['NLL_ours']:
|
| 377 |
+
self.loss_fn = self.forward_J # confidence Joint optimization
|
| 378 |
+
self.loss_gamma = 0.9
|
| 379 |
+
elif self.loss_type in ['AL', 'CEL', 'CEL_L2']:
|
| 380 |
+
self.loss_fn = self.forward_S # confidence Sample
|
| 381 |
+
elif self.loss_type in ['CEL_GRU']:
|
| 382 |
+
self.loss_fn = self.forward_S_GRU # gru
|
| 383 |
+
self.loss_gamma = 0.9
|
| 384 |
+
elif 'Search' in self.loss_type:
|
| 385 |
+
self.loss_fn = self.forward_S_Search
|
| 386 |
+
else:
|
| 387 |
+
raise Exception('invalid loss type')
|
| 388 |
+
|
| 389 |
+
self.loss_weight = loss_weight
|
| 390 |
+
self.data_type = data_type
|
| 391 |
+
self.sky_id = sky_id
|
| 392 |
+
|
| 393 |
+
# For datasets without surface normal gt, enhance its weight (decrease the weight of the dataset with gt).
|
| 394 |
+
self.nonorm_data_scale = scale
|
| 395 |
+
self.norm_dataset = norm_dataset
|
| 396 |
+
self.no_sky_dataset = no_sky_dataset
|
| 397 |
+
self.disable_dataset = disable_dataset
|
| 398 |
+
|
| 399 |
+
self.depth_detach = depth_detach
|
| 400 |
+
self.depth2normal = Depth2Normal()
|
| 401 |
+
|
| 402 |
+
def forward(self, **kwargs):
|
| 403 |
+
device = kwargs['mask'].device
|
| 404 |
+
|
| 405 |
+
batches_dataset = kwargs['dataset']
|
| 406 |
+
self.batch_with_norm = torch.tensor([self.nonorm_data_scale if batch_dataset not in self.norm_dataset else 1 \
|
| 407 |
+
for batch_dataset in batches_dataset], device=device)[:,None,None,None]
|
| 408 |
+
|
| 409 |
+
self.batch_enabled= torch.tensor([1 if batch_dataset not in self.disable_dataset else 0 \
|
| 410 |
+
for batch_dataset in batches_dataset], device=device, dtype=torch.bool)[:,None,None,None]
|
| 411 |
+
self.batch_with_norm = self.batch_with_norm * self.batch_enabled
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
self.batch_with_norm_sky = torch.tensor([1 if batch_dataset not in self.no_sky_dataset else 0 \
|
| 415 |
+
for batch_dataset in batches_dataset], device=device, dtype=torch.bool)[:,None,None,None]
|
| 416 |
+
|
| 417 |
+
B, _, H, W = kwargs['mask'].shape
|
| 418 |
+
pad_mask = torch.zeros_like(kwargs['mask'], device=device)
|
| 419 |
+
for b in range(B):
|
| 420 |
+
pad = kwargs['pad'][b].squeeze()
|
| 421 |
+
pad_mask[b, :, pad[0]:H-pad[1], pad[2]:W-pad[3]] = True
|
| 422 |
+
|
| 423 |
+
loss = self.loss_fn(pad_mask=pad_mask, **kwargs)
|
| 424 |
+
return loss * self.loss_weight
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def forward_J(self, prediction, confidence, normal_out_list, intrinsic, pad_mask, sem_mask=None, **kwargs):
|
| 428 |
+
prediction_normal = normal_out_list[-1].clone()
|
| 429 |
+
|
| 430 |
+
# get normal from depth-prediction
|
| 431 |
+
normal, new_mask = self.depth2normal(prediction.detach() if self.depth_detach else prediction, intrinsic, pad_mask)
|
| 432 |
+
# mask sky
|
| 433 |
+
sky_mask = sem_mask != self.sky_id
|
| 434 |
+
new_mask = new_mask & sky_mask
|
| 435 |
+
# normal = normal * (~sky_mask)
|
| 436 |
+
# normal[:,1:2,:,:][sky_mask] = 1
|
| 437 |
+
# confidence sampling (sample good depth -> good normal -> to )
|
| 438 |
+
sample_mask_d = sample_points(prediction, confidence, new_mask, sampling_ratio=0.7)
|
| 439 |
+
|
| 440 |
+
# all mask
|
| 441 |
+
normal_mask = ~torch.all(normal == 0, dim=1, keepdim=True) & new_mask & sample_mask_d
|
| 442 |
+
if normal_mask.sum() < 10:
|
| 443 |
+
return 0 * prediction_normal.sum()
|
| 444 |
+
|
| 445 |
+
loss = self.forward_R(prediction_normal, normal, normal_mask)
|
| 446 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 447 |
+
loss = 0 * torch.sum(prediction_normal)
|
| 448 |
+
print(f'NormalBranchLoss forward_GRU NAN error, {loss}')
|
| 449 |
+
|
| 450 |
+
return loss
|
| 451 |
+
|
| 452 |
+
#def forward_S(self, prediction, confidence, normal_out_list, intrinsic, pad_mask, sem_mask=None, **kwargs):
|
| 453 |
+
def forward_S(self, prediction, confidence, intrinsic, pad_mask, normal_pred=None, sem_mask=None, target=None, is_initial_pair=False, **kwargs):
|
| 454 |
+
|
| 455 |
+
if normal_pred is None:
|
| 456 |
+
prediction_normal = kwargs['normal_out_list'][-1]
|
| 457 |
+
else:
|
| 458 |
+
prediction_normal = normal_pred
|
| 459 |
+
|
| 460 |
+
# get normal from depth-prediction
|
| 461 |
+
#try:
|
| 462 |
+
scale = kwargs['scale'][:, None, None].float()
|
| 463 |
+
#except:
|
| 464 |
+
#scale = 1.0
|
| 465 |
+
normal, new_mask = self.depth2normal(prediction.detach() if self.depth_detach else prediction, intrinsic, pad_mask, scale)
|
| 466 |
+
|
| 467 |
+
sky_mask = sem_mask != self.sky_id
|
| 468 |
+
if target != None:
|
| 469 |
+
sampling_ratio = 0.7
|
| 470 |
+
target_mask = (target > 0)
|
| 471 |
+
if is_initial_pair == False:
|
| 472 |
+
pass
|
| 473 |
+
# mask sky
|
| 474 |
+
else:
|
| 475 |
+
sky_mask = torch.nn.functional.interpolate(sky_mask.float(), scale_factor=0.25).bool()
|
| 476 |
+
target_mask = torch.nn.functional.interpolate(target_mask.float(), scale_factor=0.25).bool()
|
| 477 |
+
new_mask = new_mask & ((sky_mask & self.batch_with_norm_sky) | target_mask)
|
| 478 |
+
else:
|
| 479 |
+
new_mask = torch.ones_like(prediction).bool()
|
| 480 |
+
sampling_ratio = 0.5
|
| 481 |
+
|
| 482 |
+
# normal = normal * (~sky_mask)
|
| 483 |
+
# normal[:,1:2,:,:][sky_mask] = 1
|
| 484 |
+
|
| 485 |
+
# dual sampling
|
| 486 |
+
confidence_normal = prediction_normal[:, 3:, :, :]
|
| 487 |
+
sample_mask_n = sample_points(prediction_normal, confidence_normal, new_mask, sampling_ratio=sampling_ratio)
|
| 488 |
+
sample_mask_d = sample_points(prediction, confidence, new_mask, sampling_ratio=sampling_ratio)
|
| 489 |
+
conf_mask = confidence > 0.5
|
| 490 |
+
|
| 491 |
+
# all mask
|
| 492 |
+
normal_mask = ~torch.all(normal == 0, dim=1, keepdim=True) & new_mask & sample_mask_n & sample_mask_d & conf_mask
|
| 493 |
+
if normal_mask.sum() < 10:
|
| 494 |
+
return 0 * prediction_normal.sum()
|
| 495 |
+
|
| 496 |
+
loss = self.forward_R(prediction_normal, normal, normal_mask)
|
| 497 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 498 |
+
loss = 0 * torch.sum(prediction_normal)
|
| 499 |
+
print(f'NormalBranchLoss forward_GRU NAN error, {loss}')
|
| 500 |
+
|
| 501 |
+
return loss
|
| 502 |
+
|
| 503 |
+
def forward_S_GRU(self, predictions_list, confidence_list, normal_out_list, intrinsic, pad_mask, sem_mask, target, low_resolution_init, **kwargs):
|
| 504 |
+
n_predictions = len(normal_out_list)
|
| 505 |
+
assert n_predictions >= 1
|
| 506 |
+
loss = 0.0
|
| 507 |
+
|
| 508 |
+
for i, (norm, conf, depth) in enumerate(zip(normal_out_list, confidence_list, predictions_list)):
|
| 509 |
+
# We adjust the loss_gamma so it is consistent for any number of RAFT-Stereo iterations
|
| 510 |
+
adjusted_loss_gamma = self.loss_gamma**(15/(n_predictions - 1))
|
| 511 |
+
i_weight = adjusted_loss_gamma**(n_predictions - i - 1)
|
| 512 |
+
|
| 513 |
+
if i == 0:
|
| 514 |
+
is_initial_pair = True
|
| 515 |
+
new_intrinsic = torch.cat((intrinsic[:, :2, :]/4, intrinsic[:, 2:3, :]), dim=1)
|
| 516 |
+
curr_loss = self.forward_S(low_resolution_init[0], low_resolution_init[1], new_intrinsic, torch.nn.functional.interpolate(pad_mask.float(), scale_factor=0.25).bool(), low_resolution_init[2], sem_mask, target, is_initial_pair, scale=kwargs['scale'])
|
| 517 |
+
else:
|
| 518 |
+
is_initial_pair = False
|
| 519 |
+
curr_loss = self.forward_S(depth, conf, intrinsic, pad_mask, norm, sem_mask, target, is_initial_pair, scale=kwargs['scale'])
|
| 520 |
+
|
| 521 |
+
if torch.isnan(curr_loss).item() | torch.isinf(curr_loss).item():
|
| 522 |
+
curr_loss = 0 * torch.sum(norm)
|
| 523 |
+
print(f'NormalBranchLoss forward_GRU NAN error, {curr_loss}')
|
| 524 |
+
|
| 525 |
+
loss += curr_loss * i_weight
|
| 526 |
+
|
| 527 |
+
return loss
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def forward_R(self, norm_out, gt_norm, gt_norm_mask, pred_kappa=None):
|
| 531 |
+
pred_norm = norm_out[:, 0:3, :, :]
|
| 532 |
+
if pred_kappa is None:
|
| 533 |
+
pred_kappa = norm_out[:, 3:, :, :]
|
| 534 |
+
|
| 535 |
+
if self.loss_type == 'L1':
|
| 536 |
+
l1 = torch.sum(torch.abs(gt_norm - pred_norm), dim=1, keepdim=True)
|
| 537 |
+
loss = torch.mean(l1[gt_norm_mask])
|
| 538 |
+
|
| 539 |
+
elif self.loss_type == 'L2' or self.loss_type == 'CEL_L2':
|
| 540 |
+
l2 = torch.sum(torch.square(gt_norm - pred_norm), dim=1, keepdim=True)
|
| 541 |
+
loss = torch.mean(l2[gt_norm_mask])
|
| 542 |
+
|
| 543 |
+
elif self.loss_type == 'AL':
|
| 544 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 545 |
+
|
| 546 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 547 |
+
* (dot.detach() < 0.999).float() \
|
| 548 |
+
* (dot.detach() > -0.999).float()
|
| 549 |
+
valid_mask = valid_mask > 0.0
|
| 550 |
+
|
| 551 |
+
al = torch.acos(dot * valid_mask)
|
| 552 |
+
al = al * self.batch_with_norm[:, 0, :, :]
|
| 553 |
+
loss = torch.mean(al)
|
| 554 |
+
|
| 555 |
+
elif self.loss_type == 'CEL' or self.loss_type == 'CEL_GRU':
|
| 556 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 557 |
+
|
| 558 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 559 |
+
* (dot.detach() < 0.999).float() \
|
| 560 |
+
* (dot.detach() > -0.999).float()
|
| 561 |
+
valid_mask = valid_mask > 0.0
|
| 562 |
+
|
| 563 |
+
al = 1 - dot * valid_mask
|
| 564 |
+
al = al * self.batch_with_norm[:, 0, :, :]
|
| 565 |
+
loss = torch.mean(al)
|
| 566 |
+
|
| 567 |
+
elif self.loss_type == 'NLL_vMF':
|
| 568 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 569 |
+
|
| 570 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 571 |
+
* (dot.detach() < 0.999).float() \
|
| 572 |
+
* (dot.detach() > -0.999).float()
|
| 573 |
+
valid_mask = valid_mask > 0.0
|
| 574 |
+
|
| 575 |
+
dot = dot[valid_mask]
|
| 576 |
+
kappa = pred_kappa[:, 0, :, :][valid_mask]
|
| 577 |
+
|
| 578 |
+
loss_pixelwise = - torch.log(kappa) \
|
| 579 |
+
- (kappa * (dot - 1)) \
|
| 580 |
+
+ torch.log(1 - torch.exp(- 2 * kappa))
|
| 581 |
+
loss = torch.mean(loss_pixelwise)
|
| 582 |
+
|
| 583 |
+
elif self.loss_type == 'NLL_ours':
|
| 584 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 585 |
+
|
| 586 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 587 |
+
* (dot.detach() < 0.999).float() \
|
| 588 |
+
* (dot.detach() > -0.999).float()
|
| 589 |
+
valid_mask = valid_mask > 0.5
|
| 590 |
+
|
| 591 |
+
dot = dot * valid_mask
|
| 592 |
+
kappa = pred_kappa[:, 0, :, :] * valid_mask
|
| 593 |
+
|
| 594 |
+
loss_pixelwise = - torch.log(torch.square(kappa) + 1) \
|
| 595 |
+
+ kappa * torch.acos(dot) \
|
| 596 |
+
+ torch.log(1 + torch.exp(-kappa * np.pi))
|
| 597 |
+
loss_pixelwise = loss_pixelwise * self.batch_with_norm[:, 0, :, :]
|
| 598 |
+
loss = torch.mean(loss_pixelwise)
|
| 599 |
+
|
| 600 |
+
else:
|
| 601 |
+
raise Exception('invalid loss type')
|
| 602 |
+
|
| 603 |
+
return loss
|
| 604 |
+
|
| 605 |
+
def forward_S_Search(self, prediction, confidence, intrinsic, pad_mask, normal_pred=None, sem_mask=None, target=None, is_initial_pair=False, **kwargs):
|
| 606 |
+
|
| 607 |
+
if normal_pred is None:
|
| 608 |
+
prediction_normal = kwargs['normal_out_list'][-1]
|
| 609 |
+
else:
|
| 610 |
+
prediction_normal = normal_pred
|
| 611 |
+
|
| 612 |
+
# get normal from depth-prediction
|
| 613 |
+
scale = kwargs['scale'][:, None, None].float()
|
| 614 |
+
candidate_scale = kwargs['candidate_scale'][:, None, None, None].float()
|
| 615 |
+
normal, new_mask = self.depth2normal(prediction.detach() if self.depth_detach else prediction, intrinsic, pad_mask, scale)
|
| 616 |
+
|
| 617 |
+
sky_mask = sem_mask != self.sky_id
|
| 618 |
+
if target != None:
|
| 619 |
+
sampling_ratio = 0.7
|
| 620 |
+
target_mask = (target > 0)
|
| 621 |
+
if is_initial_pair == False:
|
| 622 |
+
pass
|
| 623 |
+
# mask sky
|
| 624 |
+
else:
|
| 625 |
+
sky_mask = torch.nn.functional.interpolate(sky_mask.float(), scale_factor=0.25).bool()
|
| 626 |
+
target_mask = torch.nn.functional.interpolate(target_mask.float(), scale_factor=0.25).bool()
|
| 627 |
+
new_mask = new_mask & ((sky_mask & self.batch_with_norm_sky) | target_mask)
|
| 628 |
+
else:
|
| 629 |
+
new_mask = torch.ones_like(prediction).bool()
|
| 630 |
+
sampling_ratio = 0.5
|
| 631 |
+
|
| 632 |
+
# normal = normal * (~sky_mask)
|
| 633 |
+
# normal[:,1:2,:,:][sky_mask] = 1
|
| 634 |
+
|
| 635 |
+
# dual sampling
|
| 636 |
+
confidence_normal = prediction_normal[:, 3:, :, :]
|
| 637 |
+
sample_mask_n = sample_points(prediction_normal, confidence_normal, new_mask, sampling_ratio=sampling_ratio)
|
| 638 |
+
sample_mask_d = sample_points(prediction, confidence, new_mask, sampling_ratio=sampling_ratio)
|
| 639 |
+
conf_mask = confidence > 0.5
|
| 640 |
+
|
| 641 |
+
# all mask
|
| 642 |
+
normal_mask = ~torch.all(normal == 0, dim=1, keepdim=True) & new_mask & sample_mask_n & sample_mask_d & conf_mask
|
| 643 |
+
if normal_mask.sum() < 10:
|
| 644 |
+
return 0 * prediction_normal.sum()
|
| 645 |
+
|
| 646 |
+
prediction_normal = torch.cat((prediction_normal[:,:2]*torch.ones_like(candidate_scale), prediction_normal[:,2:3]*candidate_scale, prediction_normal[:,3:4]*torch.ones_like(candidate_scale)), dim=1)
|
| 647 |
+
|
| 648 |
+
norm_x = prediction_normal[:,0:1]
|
| 649 |
+
norm_y = prediction_normal[:,1:2]
|
| 650 |
+
norm_z = prediction_normal[:,2:3]
|
| 651 |
+
|
| 652 |
+
prediction_normal[:,:3] = prediction_normal[:,:3] / (torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10)
|
| 653 |
+
|
| 654 |
+
loss = self.forward_R_Search(prediction_normal, normal, normal_mask)
|
| 655 |
+
#if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 656 |
+
#loss = 0 * torch.sum(prediction_normal)
|
| 657 |
+
#print(f'NormalBranchLoss forward_GRU NAN error, {loss}')
|
| 658 |
+
|
| 659 |
+
return loss
|
| 660 |
+
|
| 661 |
+
|
| 662 |
+
def forward_R_Search(self, norm_out, gt_norm, gt_norm_mask, pred_kappa=None):
|
| 663 |
+
pred_norm = norm_out[:, 0:3, :, :]
|
| 664 |
+
if pred_kappa is None:
|
| 665 |
+
pred_kappa = norm_out[:, 3:, :, :]
|
| 666 |
+
|
| 667 |
+
if 'L1' in self.loss_type:
|
| 668 |
+
l1 = torch.sum(torch.abs(gt_norm - pred_norm), dim=1, keepdim=True)
|
| 669 |
+
loss = torch.mean(l1*gt_norm_mask, dim=[1, 2, 3])
|
| 670 |
+
|
| 671 |
+
elif 'L2' in self.loss_type:
|
| 672 |
+
l2 = torch.sum(torch.square(gt_norm - pred_norm), dim=1, keepdim=True)
|
| 673 |
+
loss = torch.mean(l2*gt_norm_mask, dim=[1, 2, 3])
|
| 674 |
+
|
| 675 |
+
elif 'AL' in self.loss_type:
|
| 676 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 677 |
+
|
| 678 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 679 |
+
* (dot.detach() < 0.999).float() \
|
| 680 |
+
* (dot.detach() > -0.999).float()
|
| 681 |
+
valid_mask = valid_mask > 0.0
|
| 682 |
+
|
| 683 |
+
al = torch.acos(dot * valid_mask)
|
| 684 |
+
loss = torch.mean(al, dim=[1, 2])
|
| 685 |
+
|
| 686 |
+
elif 'CEL' in self.loss_type:
|
| 687 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 688 |
+
|
| 689 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 690 |
+
* (dot.detach() < 0.999).float() \
|
| 691 |
+
* (dot.detach() > -0.999).float()
|
| 692 |
+
valid_mask = valid_mask > 0.0
|
| 693 |
+
|
| 694 |
+
al = 1 - dot * valid_mask
|
| 695 |
+
loss = torch.mean(al, dim=[1, 2])
|
| 696 |
+
|
| 697 |
+
elif 'NLL_vMF' in self.loss_type:
|
| 698 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 699 |
+
|
| 700 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 701 |
+
* (dot.detach() < 0.999).float() \
|
| 702 |
+
* (dot.detach() > -0.999).float()
|
| 703 |
+
valid_mask = valid_mask > 0.0
|
| 704 |
+
|
| 705 |
+
dot = dot[valid_mask]
|
| 706 |
+
kappa = pred_kappa[:, 0, :, :][valid_mask]
|
| 707 |
+
|
| 708 |
+
loss_pixelwise = - torch.log(kappa) \
|
| 709 |
+
- (kappa * (dot - 1)) \
|
| 710 |
+
+ torch.log(1 - torch.exp(- 2 * kappa))
|
| 711 |
+
loss = torch.mean(loss_pixelwise, dim=[1, 2])
|
| 712 |
+
|
| 713 |
+
elif 'NLL_ours' in self.loss_type:
|
| 714 |
+
dot = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
|
| 715 |
+
|
| 716 |
+
valid_mask = gt_norm_mask[:, 0, :, :].float() \
|
| 717 |
+
* (dot.detach() < 0.999).float() \
|
| 718 |
+
* (dot.detach() > -0.999).float()
|
| 719 |
+
valid_mask = valid_mask > 0.5
|
| 720 |
+
|
| 721 |
+
dot = dot * valid_mask
|
| 722 |
+
kappa = pred_kappa[:, 0, :, :] * valid_mask
|
| 723 |
+
|
| 724 |
+
loss_pixelwise = - torch.log(torch.square(kappa) + 1) \
|
| 725 |
+
+ kappa * torch.acos(dot) \
|
| 726 |
+
+ torch.log(1 + torch.exp(-kappa * np.pi))
|
| 727 |
+
loss = torch.mean(loss_pixelwise, dim=[1, 2])
|
| 728 |
+
|
| 729 |
+
else:
|
| 730 |
+
raise Exception('invalid loss type')
|
| 731 |
+
|
| 732 |
+
return loss
|
external/Metric3D/training/mono/model/losses/NormalRegression.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
from .depth_to_normal import Depth2Normal
|
| 6 |
+
"""
|
| 7 |
+
Sampling strategies: RS (Random Sampling), EGS (Edge-Guided Sampling), and IGS (Instance-Guided Sampling)
|
| 8 |
+
"""
|
| 9 |
+
###########
|
| 10 |
+
# RANDOM SAMPLING
|
| 11 |
+
# input:
|
| 12 |
+
# inputs[i,:], targets[i, :], masks[i, :], self.mask_value, self.point_pairs
|
| 13 |
+
# return:
|
| 14 |
+
# inputs_A, inputs_B, targets_A, targets_B, consistent_masks_A, consistent_masks_B
|
| 15 |
+
###########
|
| 16 |
+
def randomSamplingNormal(inputs, targets, masks, sample_num):
|
| 17 |
+
|
| 18 |
+
# find A-B point pairs from prediction
|
| 19 |
+
num_effect_pixels = torch.sum(masks)
|
| 20 |
+
shuffle_effect_pixels = torch.randperm(num_effect_pixels, device="cuda")
|
| 21 |
+
valid_inputs = inputs[:, masks]
|
| 22 |
+
valid_targes = targets[:, masks]
|
| 23 |
+
inputs_A = valid_inputs[:, shuffle_effect_pixels[0 : sample_num * 2 : 2]]
|
| 24 |
+
inputs_B = valid_inputs[:, shuffle_effect_pixels[1 : sample_num * 2 : 2]]
|
| 25 |
+
# find corresponding pairs from GT
|
| 26 |
+
targets_A = valid_targes[:, shuffle_effect_pixels[0 : sample_num * 2 : 2]]
|
| 27 |
+
targets_B = valid_targes[:, shuffle_effect_pixels[1 : sample_num * 2 : 2]]
|
| 28 |
+
if inputs_A.shape[1] != inputs_B.shape[1]:
|
| 29 |
+
num_min = min(targets_A.shape[1], targets_B.shape[1])
|
| 30 |
+
inputs_A = inputs_A[:, :num_min]
|
| 31 |
+
inputs_B = inputs_B[:, :num_min]
|
| 32 |
+
targets_A = targets_A[:, :num_min]
|
| 33 |
+
targets_B = targets_B[:, :num_min]
|
| 34 |
+
return inputs_A, inputs_B, targets_A, targets_B
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
###########
|
| 38 |
+
# EDGE-GUIDED SAMPLING
|
| 39 |
+
# input:
|
| 40 |
+
# inputs[i,:], targets[i, :], masks[i, :], edges_img[i], thetas_img[i], masks[i, :], h, w
|
| 41 |
+
# return:
|
| 42 |
+
# inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B
|
| 43 |
+
###########
|
| 44 |
+
def ind2sub(idx, cols):
|
| 45 |
+
r = torch.div(idx, cols, rounding_mode='floor')
|
| 46 |
+
c = idx - r * cols
|
| 47 |
+
return r, c
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def sub2ind(r, c, cols):
|
| 51 |
+
idx = r * cols + c
|
| 52 |
+
return idx
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def edgeGuidedSampling(inputs, targets, edges_img, thetas_img, masks, h, w):
|
| 56 |
+
# find edges
|
| 57 |
+
edges_max = edges_img.max()
|
| 58 |
+
edges_min = edges_img.min()
|
| 59 |
+
edges_mask = edges_img.ge(edges_max * 0.1)
|
| 60 |
+
edges_loc = edges_mask.nonzero(as_tuple=False)
|
| 61 |
+
|
| 62 |
+
thetas_edge = torch.masked_select(thetas_img, edges_mask)
|
| 63 |
+
minlen = thetas_edge.size()[0]
|
| 64 |
+
|
| 65 |
+
# find anchor points (i.e, edge points)
|
| 66 |
+
sample_num = minlen
|
| 67 |
+
index_anchors = torch.randint(0, minlen, (sample_num,), dtype=torch.long, device="cuda")
|
| 68 |
+
theta_anchors = torch.gather(thetas_edge, 0, index_anchors)
|
| 69 |
+
row_anchors, col_anchors = ind2sub(edges_loc[index_anchors].squeeze(1), w)
|
| 70 |
+
## compute the coordinates of 4-points, distances are from [2, 30]
|
| 71 |
+
distance_matrix = torch.randint(3, 20, (4, sample_num), device="cuda")
|
| 72 |
+
pos_or_neg = torch.ones(4, sample_num, device="cuda")
|
| 73 |
+
pos_or_neg[:2, :] = -pos_or_neg[:2, :]
|
| 74 |
+
distance_matrix = distance_matrix.float() * pos_or_neg
|
| 75 |
+
col = (
|
| 76 |
+
col_anchors.unsqueeze(0).expand(4, sample_num).long()
|
| 77 |
+
+ torch.round(
|
| 78 |
+
distance_matrix.float() * torch.abs(torch.cos(theta_anchors)).unsqueeze(0)
|
| 79 |
+
).long()
|
| 80 |
+
)
|
| 81 |
+
row = (
|
| 82 |
+
row_anchors.unsqueeze(0).expand(4, sample_num).long()
|
| 83 |
+
+ torch.round(
|
| 84 |
+
distance_matrix.float() * torch.abs(torch.sin(theta_anchors)).unsqueeze(0)
|
| 85 |
+
).long()
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
# constrain 0=<c<=w, 0<=r<=h
|
| 89 |
+
# Note: index should minus 1
|
| 90 |
+
col[col < 0] = 0
|
| 91 |
+
col[col > w - 1] = w - 1
|
| 92 |
+
row[row < 0] = 0
|
| 93 |
+
row[row > h - 1] = h - 1
|
| 94 |
+
|
| 95 |
+
# a-b, b-c, c-d
|
| 96 |
+
a = sub2ind(row[0, :], col[0, :], w)
|
| 97 |
+
b = sub2ind(row[1, :], col[1, :], w)
|
| 98 |
+
c = sub2ind(row[2, :], col[2, :], w)
|
| 99 |
+
d = sub2ind(row[3, :], col[3, :], w)
|
| 100 |
+
A = torch.cat((a, b, c), 0)
|
| 101 |
+
B = torch.cat((b, c, d), 0)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
inputs_A = inputs[:, A]
|
| 106 |
+
inputs_B = inputs[:, B]
|
| 107 |
+
targets_A = targets[:, A]
|
| 108 |
+
targets_B = targets[:, B]
|
| 109 |
+
masks_A = torch.gather(masks, 0, A.long())
|
| 110 |
+
masks_B = torch.gather(masks, 0, B.long())
|
| 111 |
+
|
| 112 |
+
# create A, B, C, D mask for visualization
|
| 113 |
+
# vis_mask = masks.reshape(h, w).cpu().numpy()
|
| 114 |
+
# vis_row = row.cpu()
|
| 115 |
+
# vis_col = col.cpu()
|
| 116 |
+
# visual_A = np.zeros((h, w)).astype(np.bool)
|
| 117 |
+
# visual_B = np.zeros_like(visual_A)
|
| 118 |
+
# visual_C = np.zeros_like(visual_A)
|
| 119 |
+
# visual_D = np.zeros_like(visual_A)
|
| 120 |
+
# visual_A[vis_row[0, :], vis_col[0, :]] = True
|
| 121 |
+
# visual_B[vis_row[1, :], vis_col[1, :]] = True
|
| 122 |
+
# visual_C[vis_row[2, :], vis_col[2, :]] = True
|
| 123 |
+
# visual_D[vis_row[3, :], vis_col[3, :]] = True
|
| 124 |
+
# visual_ABCD = [visual_A & vis_mask, visual_B & vis_mask,
|
| 125 |
+
# visual_C& vis_mask, visual_D& vis_mask]
|
| 126 |
+
return (
|
| 127 |
+
inputs_A,
|
| 128 |
+
inputs_B,
|
| 129 |
+
targets_A,
|
| 130 |
+
targets_B,
|
| 131 |
+
masks_A,
|
| 132 |
+
masks_B,
|
| 133 |
+
sample_num,
|
| 134 |
+
row,
|
| 135 |
+
col,
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
######################################################
|
| 140 |
+
# EdgeguidedNormalRankingLoss
|
| 141 |
+
#####################################################
|
| 142 |
+
class EdgeguidedNormalLoss(nn.Module):
|
| 143 |
+
def __init__(
|
| 144 |
+
self,
|
| 145 |
+
point_pairs=10000,
|
| 146 |
+
cos_theta1=0.25,
|
| 147 |
+
cos_theta2=0.98,
|
| 148 |
+
cos_theta3=0.5,
|
| 149 |
+
cos_theta4=0.86,
|
| 150 |
+
mask_value=1e-8,
|
| 151 |
+
loss_weight=1.0,
|
| 152 |
+
data_type=['stereo', 'denselidar', 'denselidar_nometric','denselidar_syn'],
|
| 153 |
+
**kwargs
|
| 154 |
+
):
|
| 155 |
+
super(EdgeguidedNormalLoss, self).__init__()
|
| 156 |
+
self.point_pairs = point_pairs # number of point pairs
|
| 157 |
+
self.mask_value = mask_value
|
| 158 |
+
self.cos_theta1 = cos_theta1 # 75 degree
|
| 159 |
+
self.cos_theta2 = cos_theta2 # 10 degree
|
| 160 |
+
self.cos_theta3 = cos_theta3 # 60 degree
|
| 161 |
+
self.cos_theta4 = cos_theta4 # 30 degree
|
| 162 |
+
# self.kernel = torch.tensor(
|
| 163 |
+
# np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32),
|
| 164 |
+
# requires_grad=False,
|
| 165 |
+
# )[None, None, :, :].cuda()
|
| 166 |
+
self.depth2normal = Depth2Normal()
|
| 167 |
+
self.loss_weight = loss_weight
|
| 168 |
+
self.data_type = data_type
|
| 169 |
+
self.eps = 1e-6
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def getEdge(self, images):
|
| 173 |
+
n, c, h, w = images.size()
|
| 174 |
+
a = (
|
| 175 |
+
torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=torch.float32, device="cuda")
|
| 176 |
+
.contiguous()
|
| 177 |
+
.view((1, 1, 3, 3))
|
| 178 |
+
.repeat(1, 1, 1, 1)
|
| 179 |
+
)
|
| 180 |
+
b = (
|
| 181 |
+
torch.tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=torch.float32, device="cuda")
|
| 182 |
+
.contiguous()
|
| 183 |
+
.view((1, 1, 3, 3))
|
| 184 |
+
.repeat(1, 1, 1, 1)
|
| 185 |
+
)
|
| 186 |
+
if c == 3:
|
| 187 |
+
gradient_x = F.conv2d(images[:, 0, :, :].unsqueeze(1), a)
|
| 188 |
+
gradient_y = F.conv2d(images[:, 0, :, :].unsqueeze(1), b)
|
| 189 |
+
else:
|
| 190 |
+
gradient_x = F.conv2d(images, a)
|
| 191 |
+
gradient_y = F.conv2d(images, b)
|
| 192 |
+
edges = torch.sqrt(torch.pow(gradient_x, 2) + torch.pow(gradient_y, 2))
|
| 193 |
+
edges = F.pad(edges, (1, 1, 1, 1), "constant", 0)
|
| 194 |
+
thetas = torch.atan2(gradient_y, gradient_x)
|
| 195 |
+
thetas = F.pad(thetas, (1, 1, 1, 1), "constant", 0)
|
| 196 |
+
return edges, thetas
|
| 197 |
+
|
| 198 |
+
def getNormalEdge(self, normals):
|
| 199 |
+
n, c, h, w = normals.size()
|
| 200 |
+
a = (
|
| 201 |
+
torch.Tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=torch.float32, device="cuda")
|
| 202 |
+
.contiguous()
|
| 203 |
+
.view((1, 1, 3, 3))
|
| 204 |
+
.repeat(3, 1, 1, 1)
|
| 205 |
+
)
|
| 206 |
+
b = (
|
| 207 |
+
torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=torch.float32, device="cuda")
|
| 208 |
+
.contiguous()
|
| 209 |
+
.view((1, 1, 3, 3))
|
| 210 |
+
.repeat(3, 1, 1, 1)
|
| 211 |
+
)
|
| 212 |
+
gradient_x = torch.abs(F.conv2d(normals, a, groups=c))
|
| 213 |
+
gradient_y = torch.abs(F.conv2d(normals, b, groups=c))
|
| 214 |
+
gradient_x = gradient_x.mean(dim=1, keepdim=True)
|
| 215 |
+
gradient_y = gradient_y.mean(dim=1, keepdim=True)
|
| 216 |
+
edges = torch.sqrt(torch.pow(gradient_x, 2) + torch.pow(gradient_y, 2))
|
| 217 |
+
edges = F.pad(edges, (1, 1, 1, 1), "constant", 0)
|
| 218 |
+
thetas = torch.atan2(gradient_y, gradient_x)
|
| 219 |
+
thetas = F.pad(thetas, (1, 1, 1, 1), "constant", 0)
|
| 220 |
+
return edges, thetas
|
| 221 |
+
|
| 222 |
+
def visual_check(self, rgb, samples):
|
| 223 |
+
import os
|
| 224 |
+
import matplotlib.pyplot as plt
|
| 225 |
+
rgb = rgb.cpu().squeeze().numpy()
|
| 226 |
+
|
| 227 |
+
mean = np.array([123.675, 116.28, 103.53])[:, np.newaxis, np.newaxis]
|
| 228 |
+
std= np.array([58.395, 57.12, 57.375])[:, np.newaxis, np.newaxis]
|
| 229 |
+
|
| 230 |
+
rgb = ((rgb * std) + mean).astype(np.uint8).transpose((1, 2, 0))
|
| 231 |
+
mask_A, mask_B, mask_C, mask_D = samples
|
| 232 |
+
rgb[mask_A.astype(np.bool)] = [255, 0, 0]
|
| 233 |
+
rgb[mask_B.astype(np.bool)] = [0, 255, 0]
|
| 234 |
+
rgb[mask_C.astype(np.bool)] = [0, 0, 255]
|
| 235 |
+
rgb[mask_D.astype(np.bool)] = [255, 255, 0]
|
| 236 |
+
|
| 237 |
+
filename = str(np.random.randint(10000))
|
| 238 |
+
save_path = os.path.join('test_ranking', filename + '.png')
|
| 239 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
| 240 |
+
plt.imsave(save_path, rgb)
|
| 241 |
+
|
| 242 |
+
def forward(self, prediction, target, mask, input, intrinsic, **kwargs):
|
| 243 |
+
loss = self.get_loss(prediction, target, mask, input, intrinsic, **kwargs)
|
| 244 |
+
return loss
|
| 245 |
+
|
| 246 |
+
def get_loss(self, prediction, target, mask, input, intrinsic, **kwargs):
|
| 247 |
+
"""
|
| 248 |
+
input and target: surface normal input
|
| 249 |
+
input: rgb images
|
| 250 |
+
"""
|
| 251 |
+
gt_depths = target
|
| 252 |
+
|
| 253 |
+
if 'predictions_normals' not in kwargs:
|
| 254 |
+
predictions_normals, _ = self.depth2normal(prediction, intrinsic, mask)
|
| 255 |
+
targets_normals, targets_normals_masks = self.depth2normal(target, intrinsic, mask)
|
| 256 |
+
else:
|
| 257 |
+
predictions_normals = kwargs['predictions_normals']
|
| 258 |
+
targets_normals = kwargs['targets_normals']
|
| 259 |
+
targets_normals_masks = kwargs['targets_normals_masks']
|
| 260 |
+
masks_normals = mask & targets_normals_masks
|
| 261 |
+
|
| 262 |
+
# find edges from RGB
|
| 263 |
+
edges_img, thetas_img = self.getEdge(input)
|
| 264 |
+
|
| 265 |
+
# find edges from normals
|
| 266 |
+
# edges_normal, thetas_normal = self.getNormalEdge(targets_normals)
|
| 267 |
+
#mask_img_border = torch.ones_like(edges_normal) # normals on the borders
|
| 268 |
+
#mask_img_border[:, :, 5:-5, 5:-5] = 0
|
| 269 |
+
# edges_normal[~targets_normals_masks] = 0
|
| 270 |
+
|
| 271 |
+
# find edges from depth
|
| 272 |
+
edges_depth, thetas_depth = self.getEdge(gt_depths)
|
| 273 |
+
# edges_depth_mask = edges_depth.ge(edges_depth.max() * 0.1)
|
| 274 |
+
# edges_mask_dilate = torch.clamp(
|
| 275 |
+
# torch.nn.functional.conv2d(
|
| 276 |
+
# edges_depth_mask.float(), self.kernel, padding=(1, 1)
|
| 277 |
+
# ),
|
| 278 |
+
# 0,
|
| 279 |
+
# 1,
|
| 280 |
+
# ).bool()
|
| 281 |
+
# edges_normal[edges_mask_dilate] = 0
|
| 282 |
+
# edges_img[edges_mask_dilate] = 0
|
| 283 |
+
|
| 284 |
+
# =============================
|
| 285 |
+
n, c, h, w = targets_normals.size()
|
| 286 |
+
|
| 287 |
+
predictions_normals = predictions_normals.contiguous().view(n, c, -1)
|
| 288 |
+
targets_normals = targets_normals.contiguous().view(n, c, -1)
|
| 289 |
+
masks_normals = masks_normals.contiguous().view(n, -1)
|
| 290 |
+
edges_img = edges_img.contiguous().view(n, -1)
|
| 291 |
+
thetas_img = thetas_img.contiguous().view(n, -1)
|
| 292 |
+
# edges_normal = edges_normal.view(n, -1)
|
| 293 |
+
# thetas_normal = thetas_normal.view(n, -1)
|
| 294 |
+
edges_depth = edges_depth.contiguous().view(n, -1)
|
| 295 |
+
thetas_depth = thetas_depth.contiguous().view(n, -1)
|
| 296 |
+
|
| 297 |
+
# # initialization
|
| 298 |
+
losses = 0.0
|
| 299 |
+
valid_samples = 0.0
|
| 300 |
+
for i in range(n):
|
| 301 |
+
# Edge-Guided sampling
|
| 302 |
+
(
|
| 303 |
+
inputs_A,
|
| 304 |
+
inputs_B,
|
| 305 |
+
targets_A,
|
| 306 |
+
targets_B,
|
| 307 |
+
masks_A,
|
| 308 |
+
masks_B,
|
| 309 |
+
sample_num,
|
| 310 |
+
row_img,
|
| 311 |
+
col_img,
|
| 312 |
+
) = edgeGuidedSampling(
|
| 313 |
+
predictions_normals[i, :],
|
| 314 |
+
targets_normals[i, :],
|
| 315 |
+
edges_img[i],
|
| 316 |
+
thetas_img[i],
|
| 317 |
+
masks_normals[i, :],
|
| 318 |
+
h,
|
| 319 |
+
w,
|
| 320 |
+
)
|
| 321 |
+
# Depth-Guided sampling
|
| 322 |
+
# (
|
| 323 |
+
# depth_inputs_A,
|
| 324 |
+
# depth_inputs_B,
|
| 325 |
+
# depth_targets_A,
|
| 326 |
+
# depth_targets_B,
|
| 327 |
+
# depth_masks_A,
|
| 328 |
+
# depth_masks_B,
|
| 329 |
+
# depth_sample_num,
|
| 330 |
+
# row_img,
|
| 331 |
+
# col_img,
|
| 332 |
+
# ) = edgeGuidedSampling(
|
| 333 |
+
# predictions_normals[i, :],
|
| 334 |
+
# targets_normals[i, :],
|
| 335 |
+
# edges_depth[i],
|
| 336 |
+
# thetas_depth[i],
|
| 337 |
+
# masks_normals[i, :],
|
| 338 |
+
# h,
|
| 339 |
+
# w,
|
| 340 |
+
# )
|
| 341 |
+
# Normal-Guided sampling
|
| 342 |
+
# (
|
| 343 |
+
# normal_inputs_A,
|
| 344 |
+
# normal_inputs_B,
|
| 345 |
+
# normal_targets_A,
|
| 346 |
+
# normal_targets_B,
|
| 347 |
+
# normal_masks_A,
|
| 348 |
+
# normal_masks_B,
|
| 349 |
+
# normal_sample_num,
|
| 350 |
+
# row_normal,
|
| 351 |
+
# col_normal,
|
| 352 |
+
# ) = edgeGuidedSampling(
|
| 353 |
+
# predictions_normals[i, :],
|
| 354 |
+
# targets_normals[i, :],
|
| 355 |
+
# edges_normal[i],
|
| 356 |
+
# thetas_normal[i],
|
| 357 |
+
# masks_normals[i, :],
|
| 358 |
+
# h,
|
| 359 |
+
# w,
|
| 360 |
+
# )
|
| 361 |
+
|
| 362 |
+
# Combine EGS + DEGS
|
| 363 |
+
# inputs_A = torch.cat((inputs_A, depth_inputs_A), 1) #normal_inputs_A
|
| 364 |
+
# inputs_B = torch.cat((inputs_B, depth_inputs_B), 1) # normal_inputs_B
|
| 365 |
+
# targets_A = torch.cat((targets_A, depth_targets_A), 1) #normal_targets_A
|
| 366 |
+
# targets_B = torch.cat((targets_B, depth_targets_B), 1) #normal_targets_B
|
| 367 |
+
# masks_A = torch.cat((masks_A, depth_masks_A), 0) #normal_masks_A
|
| 368 |
+
# masks_B = torch.cat((masks_B, depth_masks_B), 0) #normal_masks_B
|
| 369 |
+
|
| 370 |
+
# consider forward-backward consistency checking, i.e, only compute losses of point pairs with valid GT
|
| 371 |
+
consistency_mask = masks_A & masks_B
|
| 372 |
+
|
| 373 |
+
# GT ordinal relationship
|
| 374 |
+
target_cos = torch.sum(targets_A * targets_B, dim=0)
|
| 375 |
+
input_cos = torch.sum(inputs_A * inputs_B, dim=0)
|
| 376 |
+
|
| 377 |
+
losses += torch.sum(torch.abs(torch.ones_like(target_cos)-input_cos) * consistency_mask.float())
|
| 378 |
+
valid_samples += torch.sum(consistency_mask.float())
|
| 379 |
+
|
| 380 |
+
loss = (losses / (valid_samples + self.eps)) * self.loss_weight
|
| 381 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 382 |
+
loss = 0 * torch.sum(prediction)
|
| 383 |
+
print(f'Pair-wise Normal Regression Loss NAN error, {loss}, valid pix: {valid_samples}')
|
| 384 |
+
return loss
|
| 385 |
+
|
| 386 |
+
def tmp_check_normal(normals, masks, depth):
|
| 387 |
+
import matplotlib.pyplot as plt
|
| 388 |
+
import os
|
| 389 |
+
import cv2
|
| 390 |
+
from mono.utils.visualization import vis_surface_normal
|
| 391 |
+
vis_normal1 = vis_surface_normal(normals[0, ...].permute(1, 2, 0).detach(), masks[0,...].detach().squeeze())
|
| 392 |
+
vis_normal2 = vis_surface_normal(normals[1, ...].permute(1, 2, 0).detach(), masks[1,...].detach().squeeze())
|
| 393 |
+
vis_depth1 = depth[0, ...].detach().cpu().squeeze().numpy()
|
| 394 |
+
vis_depth2 = depth[1, ...].detach().cpu().squeeze().numpy()
|
| 395 |
+
|
| 396 |
+
name = np.random.randint(100000)
|
| 397 |
+
os.makedirs('test_normal', exist_ok=True)
|
| 398 |
+
cv2.imwrite(f'test_normal/{name}.png', vis_normal1)
|
| 399 |
+
cv2.imwrite(f'test_normal/{name + 1}.png', vis_normal2)
|
| 400 |
+
plt.imsave(f'test_normal/{name}_d.png', vis_depth1)
|
| 401 |
+
plt.imsave(f'test_normal/{name + 1}_d.png', vis_depth2)
|
| 402 |
+
|
| 403 |
+
if __name__ == '__main__':
|
| 404 |
+
ENL = EdgeguidedNormalLoss()
|
| 405 |
+
depth = np.random.randn(2, 1, 20, 22)
|
| 406 |
+
intrin = np.array([[300, 0, 10], [0, 300, 10], [0,0,1]])
|
| 407 |
+
prediction = np.random.randn(2, 1, 20, 22)
|
| 408 |
+
imgs = np.random.randn(2, 3, 20, 22)
|
| 409 |
+
intrinsics = np.stack([intrin, intrin], axis=0)
|
| 410 |
+
|
| 411 |
+
depth_t = torch.from_numpy(depth).cuda().float()
|
| 412 |
+
prediction = torch.from_numpy(prediction).cuda().float()
|
| 413 |
+
intrinsics = torch.from_numpy(intrinsics).cuda().float()
|
| 414 |
+
imgs = torch.from_numpy(imgs).cuda().float()
|
| 415 |
+
depth_t = -1 * torch.abs(depth_t)
|
| 416 |
+
|
| 417 |
+
loss = ENL(prediction, depth_t, masks=depth_t>0, images=imgs, intrinsic=intrinsics)
|
| 418 |
+
print(loss)
|
external/Metric3D/training/mono/model/losses/PWN_Planes.py
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class PWNPlanesLoss(nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Virtual Normal Loss Function.
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self, delta_cos=0.867, delta_diff_x=0.007,
|
| 11 |
+
delta_diff_y=0.007, sample_groups=5000, loss_weight=1.0, data_type=['lidar', 'denselidar'], **kwargs):
|
| 12 |
+
"""
|
| 13 |
+
Virtual normal planes loss, which constrain points to be on the same 3D plane.
|
| 14 |
+
:para focal_x: folcal length fx
|
| 15 |
+
:para focal_y: folcal length fy
|
| 16 |
+
:para input_size: input image size
|
| 17 |
+
:para delta_cos: a threshold for the angle among three point, three points should not be on the same plane
|
| 18 |
+
:para delta_diff_x: a threshold for the distance among three points along the x axis
|
| 19 |
+
:para delta_diff_y: a threshold for the distance among three points along the y axis
|
| 20 |
+
:para sample_groups: sample groups number, each group with 3 points can construct a plane
|
| 21 |
+
"""
|
| 22 |
+
super(PWNPlanesLoss, self).__init__()
|
| 23 |
+
self.delta_cos = delta_cos
|
| 24 |
+
self.delta_diff_x = delta_diff_x
|
| 25 |
+
self.delta_diff_y = delta_diff_y
|
| 26 |
+
self.sample_groups = sample_groups
|
| 27 |
+
self.loss_weight = loss_weight
|
| 28 |
+
self.data_type = data_type
|
| 29 |
+
|
| 30 |
+
def init_image_coor(self, B, H, W):
|
| 31 |
+
u = torch.arange(0, H, dtype=torch.float32, device="cuda").contiguous().view(1, H, 1).expand(1, H, W) # [1, H, W]
|
| 32 |
+
v = torch.arange(0, W, dtype=torch.float32, device="cuda").contiguous().view(1, 1, W).expand(1, H, W) # [1, H, W]
|
| 33 |
+
ones = torch.ones((1, H, W), dtype=torch.float32, device="cuda")
|
| 34 |
+
pixel_coords = torch.stack((u, v, ones), dim=1).expand(B, 3, H, W) # [B, 3, H, W]
|
| 35 |
+
# self.register_buffer('uv', pixel_coords, persistent=False)
|
| 36 |
+
self.uv = pixel_coords
|
| 37 |
+
|
| 38 |
+
def upproj_pcd(self, depth, intrinsics_inv):
|
| 39 |
+
"""Transform coordinates in the pixel frame to the camera frame.
|
| 40 |
+
Args:
|
| 41 |
+
depth: depth maps -- [B, 1, H, W]
|
| 42 |
+
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
|
| 43 |
+
Returns:
|
| 44 |
+
array of (u,v,1) cam coordinates -- [B, 3, H, W]
|
| 45 |
+
"""
|
| 46 |
+
b, _, h, w = depth.size()
|
| 47 |
+
assert self.uv.shape[0] == b
|
| 48 |
+
current_pixel_coords = self.uv.reshape(b, 3, -1) # [B, 3, H*W]
|
| 49 |
+
cam_coords = (intrinsics_inv @ current_pixel_coords)
|
| 50 |
+
cam_coords = cam_coords.reshape(b, 3, h, w)
|
| 51 |
+
out = depth * cam_coords
|
| 52 |
+
return out
|
| 53 |
+
|
| 54 |
+
# def transfer_xyz(self, depth):
|
| 55 |
+
# x = self.u_u0 * torch.abs(depth) / self.focal_length
|
| 56 |
+
# y = self.v_v0 * torch.abs(depth) / self.focal_length
|
| 57 |
+
# z = depth
|
| 58 |
+
# pw = torch.cat([x, y, z], 1).permute(0, 2, 3, 1).contiguous() # [b, h, w, c]
|
| 59 |
+
# return pw
|
| 60 |
+
|
| 61 |
+
# def transfer_uvz(self, depth):
|
| 62 |
+
# max_uv = self.u_u0.max()
|
| 63 |
+
# u = self.u_u0.repeat((depth.shape[0], 1, 1, 1)) / max_uv
|
| 64 |
+
# v = self.v_v0.repeat((depth.shape[0], 1, 1, 1)) / max_uv
|
| 65 |
+
# z = depth
|
| 66 |
+
# pw = torch.cat([u, v, z], 1).permute(0, 2, 3, 1).contiguous() # [b, h, w, c]
|
| 67 |
+
# return pw
|
| 68 |
+
|
| 69 |
+
def select_index(self, mask_kp):
|
| 70 |
+
x, _, h, w = mask_kp.shape
|
| 71 |
+
|
| 72 |
+
select_size = int(3 * self.sample_groups)
|
| 73 |
+
p1_x = []
|
| 74 |
+
p1_y = []
|
| 75 |
+
p2_x = []
|
| 76 |
+
p2_y = []
|
| 77 |
+
p3_x = []
|
| 78 |
+
p3_y = []
|
| 79 |
+
valid_batch = torch.ones((x, 1), dtype=torch.bool, device="cuda")
|
| 80 |
+
for i in range(x):
|
| 81 |
+
mask_kp_i = mask_kp[i, 0, :, :]
|
| 82 |
+
valid_points = torch.nonzero(mask_kp_i)
|
| 83 |
+
|
| 84 |
+
if valid_points.shape[0] < select_size * 0.6:
|
| 85 |
+
valid_points = torch.nonzero(~mask_kp_i.to(torch.uint8))
|
| 86 |
+
valid_batch[i, :] = False
|
| 87 |
+
elif valid_points.shape[0] < select_size:
|
| 88 |
+
repeat_idx = torch.randperm(valid_points.shape[0], device="cuda")[:select_size - valid_points.shape[0]]
|
| 89 |
+
valid_repeat = valid_points[repeat_idx]
|
| 90 |
+
valid_points = torch.cat((valid_points, valid_repeat), 0)
|
| 91 |
+
else:
|
| 92 |
+
valid_points = valid_points
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
if valid_points.shape[0] <= select_size:
|
| 96 |
+
valid_points = torch.nonzero(~mask_kp_i.to(torch.uint8))
|
| 97 |
+
valid_batch[i, :] = False
|
| 98 |
+
"""
|
| 99 |
+
select_indx = torch.randperm(valid_points.size(0), device="cuda")
|
| 100 |
+
|
| 101 |
+
p1 = valid_points[select_indx[0:select_size:3]]
|
| 102 |
+
p2 = valid_points[select_indx[1:select_size:3]]
|
| 103 |
+
p3 = valid_points[select_indx[2:select_size:3]]
|
| 104 |
+
|
| 105 |
+
p1_x.append(p1[:, 1])
|
| 106 |
+
p1_y.append(p1[:, 0])
|
| 107 |
+
|
| 108 |
+
p2_x.append(p2[:, 1])
|
| 109 |
+
p2_y.append(p2[:, 0])
|
| 110 |
+
|
| 111 |
+
p3_x.append(p3[:, 1])
|
| 112 |
+
p3_y.append(p3[:, 0])
|
| 113 |
+
p123 = {'p1_x': torch.stack(p1_x), 'p1_y': torch.stack(p1_y),
|
| 114 |
+
'p2_x': torch.stack(p2_x), 'p2_y': torch.stack(p2_y),
|
| 115 |
+
'p3_x': torch.stack(p3_x), 'p3_y': torch.stack(p3_y),
|
| 116 |
+
'valid_batch': valid_batch}
|
| 117 |
+
return p123
|
| 118 |
+
|
| 119 |
+
def form_pw_groups(self, p123, pw):
|
| 120 |
+
"""
|
| 121 |
+
Form 3D points groups, with 3 points in each grouup.
|
| 122 |
+
:param p123: points index
|
| 123 |
+
:param pw: 3D points, # [1, h, w, c]
|
| 124 |
+
:return:
|
| 125 |
+
"""
|
| 126 |
+
p1_x = p123['p1_x']
|
| 127 |
+
p1_y = p123['p1_y']
|
| 128 |
+
p2_x = p123['p2_x']
|
| 129 |
+
p2_y = p123['p2_y']
|
| 130 |
+
p3_x = p123['p3_x']
|
| 131 |
+
p3_y = p123['p3_y']
|
| 132 |
+
batch_list = torch.arange(0, p1_x.shape[0], device="cuda")[:, None]
|
| 133 |
+
pw = pw.repeat((p1_x.shape[0], 1, 1, 1))
|
| 134 |
+
pw1 = pw[batch_list, p1_y, p1_x, :]
|
| 135 |
+
pw2 = pw[batch_list, p2_y, p2_x, :]
|
| 136 |
+
pw3 = pw[batch_list, p3_y, p3_x, :]
|
| 137 |
+
|
| 138 |
+
# [B, N, 3(x,y,z), 3(p1,p2,p3)]
|
| 139 |
+
pw_groups = torch.cat([pw1[:, :, :, None], pw2[:, :, :, None], pw3[:, :, :, None]], 3)
|
| 140 |
+
return pw_groups
|
| 141 |
+
|
| 142 |
+
def filter_mask(self, pw_pred):
|
| 143 |
+
"""
|
| 144 |
+
:param pw_pred: constructed 3d vector (x, y, disp), [B, N, 3(x,y,z), 3(p1,p2,p3)]
|
| 145 |
+
"""
|
| 146 |
+
xy12 = pw_pred[:, :, 0:2, 1] - pw_pred[:, :, 0:2, 0]
|
| 147 |
+
xy13 = pw_pred[:, :, 0:2, 2] - pw_pred[:, :, 0:2, 0]
|
| 148 |
+
xy23 = pw_pred[:, :, 0:2, 2] - pw_pred[:, :, 0:2, 1]
|
| 149 |
+
# Ignore linear
|
| 150 |
+
xy_diff = torch.cat([xy12[:, :, :, np.newaxis], xy13[:, :, :, np.newaxis], xy23[:, :, :, np.newaxis]],
|
| 151 |
+
3) # [b, n, 2(xy), 3]
|
| 152 |
+
m_batchsize, groups, coords, index = xy_diff.shape
|
| 153 |
+
proj_query = xy_diff.contiguous().view(m_batchsize * groups, -1, index).permute(0, 2, 1).contiguous() # [bn, 3(p123), 2(xy)]
|
| 154 |
+
proj_key = xy_diff.contiguous().view(m_batchsize * groups, -1, index) # [bn, 2(xy), 3(p123)]
|
| 155 |
+
q_norm = proj_query.norm(2, dim=2) # [bn, 3(p123)]
|
| 156 |
+
nm = torch.bmm(q_norm.contiguous().view(m_batchsize * groups, index, 1), q_norm.contiguous().view(m_batchsize * groups, 1, index)) # []
|
| 157 |
+
energy = torch.bmm(proj_query, proj_key) # transpose check [bn, 3(p123), 3(p123)]
|
| 158 |
+
norm_energy = energy / (nm + 1e-8)
|
| 159 |
+
norm_energy = norm_energy.contiguous().view(m_batchsize * groups, -1) # [bn, 9(p123)]
|
| 160 |
+
mask_cos = torch.sum((norm_energy > self.delta_cos) + (norm_energy < -self.delta_cos), 1) > 3 # igonre
|
| 161 |
+
mask_cos = mask_cos.contiguous().view(m_batchsize, groups) # [b, n] # igonre
|
| 162 |
+
|
| 163 |
+
#ignore near
|
| 164 |
+
mask_x = torch.sum(torch.abs(xy_diff[:, :, 0, :]) < self.delta_diff_x, 2) > 0
|
| 165 |
+
mask_y = torch.sum(torch.abs(xy_diff[:, :, 1, :]) < self.delta_diff_y, 2) > 0
|
| 166 |
+
mask_near = mask_x & mask_y
|
| 167 |
+
mask_valid_pts = ~(mask_cos | mask_near)
|
| 168 |
+
return mask_valid_pts
|
| 169 |
+
|
| 170 |
+
def select_points_groups(self, pcd_bi, mask_kp):
|
| 171 |
+
p123 = self.select_index(mask_kp) # p1_x: [x, n]
|
| 172 |
+
pcd_bi = pcd_bi.permute((0, 2, 3, 1)).contiguous() #[1, h, w, 3(xyz)]
|
| 173 |
+
groups_pred = self.form_pw_groups(p123, pcd_bi) # [x, N, 3(x,y,z), 3(p1,p2,p3)]
|
| 174 |
+
|
| 175 |
+
# mask:[x, n]
|
| 176 |
+
mask_valid_pts = (self.filter_mask(groups_pred)).to(torch.bool) # [x, n]
|
| 177 |
+
mask_valid_batch = p123['valid_batch'].repeat(1, mask_valid_pts.shape[1]) # [x, n]
|
| 178 |
+
mask_valid = mask_valid_pts & mask_valid_batch # [x, n]
|
| 179 |
+
return groups_pred, mask_valid
|
| 180 |
+
|
| 181 |
+
def constrain_a_plane_loss(self, pw_groups_pre_i, mask_valid_i):
|
| 182 |
+
"""
|
| 183 |
+
pw_groups_pre: selected points groups for the i-th plane, [N, 3(x,y,z), 3(p1,p2,p3)]
|
| 184 |
+
"""
|
| 185 |
+
if torch.sum(mask_valid_i) < 2:
|
| 186 |
+
return 0.0 * torch.sum(pw_groups_pre_i), 0
|
| 187 |
+
pw_groups_pred_i = pw_groups_pre_i[mask_valid_i] # [n, 3, 3]
|
| 188 |
+
p12 = pw_groups_pred_i[:, :, 1] - pw_groups_pred_i[:, :, 0]
|
| 189 |
+
p13 = pw_groups_pred_i[:, :, 2] - pw_groups_pred_i[:, :, 0]
|
| 190 |
+
virtual_normal = torch.cross(p12, p13, dim=1) # [n, 3]
|
| 191 |
+
norm = torch.norm(virtual_normal, 2, dim=1, keepdim=True)
|
| 192 |
+
virtual_normal = virtual_normal / (norm + 1e-8)
|
| 193 |
+
|
| 194 |
+
# re-orient normals consistently
|
| 195 |
+
orient_mask = torch.sum(torch.squeeze(virtual_normal) * torch.squeeze(pw_groups_pred_i[:, :, 0]), dim=1) > 0
|
| 196 |
+
virtual_normal[orient_mask] *= -1
|
| 197 |
+
#direct = virtual_normal[:, 2] / torch.abs(virtual_normal[:, 2])
|
| 198 |
+
#virtual_normal = virtual_normal / direct[:, None] # [n, 3]
|
| 199 |
+
|
| 200 |
+
aver_normal = torch.sum(virtual_normal, dim=0)
|
| 201 |
+
aver_norm = torch.norm(aver_normal, 2, dim=0, keepdim=True)
|
| 202 |
+
aver_normal = aver_normal / (aver_norm + 1e-5) # [3]
|
| 203 |
+
|
| 204 |
+
cos_diff = 1.0 - torch.sum(virtual_normal * aver_normal, dim=1)
|
| 205 |
+
loss_sum = torch.sum(cos_diff, dim=0)
|
| 206 |
+
valid_num = cos_diff.numel()
|
| 207 |
+
return loss_sum, valid_num
|
| 208 |
+
|
| 209 |
+
def get_loss(self, pred_depth, gt_depth, ins_planes_mask, intrinsic=None):
|
| 210 |
+
"""
|
| 211 |
+
Co-plane loss. Enforce points residing on the same instance plane to be co-plane.
|
| 212 |
+
:param pred_depth: predicted depth map, [B,C,H,W]
|
| 213 |
+
:param mask: mask for planes, each plane is noted with a value, [B, C, H, W]
|
| 214 |
+
:param focal_length: focal length
|
| 215 |
+
"""
|
| 216 |
+
if pred_depth.ndim==3:
|
| 217 |
+
pred_depth = pred_depth[None, ...]
|
| 218 |
+
if gt_depth.ndim == 3:
|
| 219 |
+
gt_depth = gt_depth[None, ...]
|
| 220 |
+
if ins_planes_mask.ndim == 3:
|
| 221 |
+
ins_planes_mask = ins_planes_mask[None, ...]
|
| 222 |
+
|
| 223 |
+
B, _, H, W = pred_depth.shape
|
| 224 |
+
loss_sum = torch.tensor(0.0, device="cuda")
|
| 225 |
+
valid_planes_num = 0
|
| 226 |
+
|
| 227 |
+
#if 'uv' not in self._buffers or ('uv' in self._buffers and self.uv.shape[0] != B):
|
| 228 |
+
self.init_image_coor(B, H, W)
|
| 229 |
+
pcd = self.upproj_pcd(pred_depth, intrinsic.inverse())
|
| 230 |
+
|
| 231 |
+
for i in range(B):
|
| 232 |
+
mask_i = ins_planes_mask[i, :][None, :, :]
|
| 233 |
+
unique_planes = torch.unique(mask_i)
|
| 234 |
+
planes = [mask_i == m for m in unique_planes if m != 0] #[x, 1, h, w] x is the planes number
|
| 235 |
+
if len(planes) == 0:
|
| 236 |
+
continue
|
| 237 |
+
mask_planes = torch.cat(planes, dim=0) #torch.stack(planes, dim=0) #
|
| 238 |
+
pcd_grops_pred, mask_valid = self.select_points_groups(pcd[i, ...][None, :, :, :], mask_planes) # [x, N, 3(x,y,z), 3(p1,p2,p3)]
|
| 239 |
+
|
| 240 |
+
for j in range(unique_planes.numel()-1):
|
| 241 |
+
mask_valid_j = mask_valid[j, :]
|
| 242 |
+
pcd_grops_pred_j = pcd_grops_pred[j, :]
|
| 243 |
+
loss_tmp, valid_angles = self.constrain_a_plane_loss(pcd_grops_pred_j, mask_valid_j)
|
| 244 |
+
valid_planes_num += valid_angles
|
| 245 |
+
loss_sum += loss_tmp
|
| 246 |
+
|
| 247 |
+
loss = loss_sum / (valid_planes_num + 1e-6) * self.loss_weight
|
| 248 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 249 |
+
loss = torch.sum(pred_depth) * 0
|
| 250 |
+
print(f'PWNPlane NAN error, {loss}')
|
| 251 |
+
return loss
|
| 252 |
+
|
| 253 |
+
def forward(self, prediction, target, mask, intrinsic, **kwargs): #gt_depth, pred_depth, select=True):
|
| 254 |
+
"""
|
| 255 |
+
Virtual normal loss.
|
| 256 |
+
:param prediction: predicted depth map, [B,W,H,C]
|
| 257 |
+
:param data: target label, ground truth depth, [B, W, H, C], padding region [padding_up, padding_down]
|
| 258 |
+
:return:
|
| 259 |
+
"""
|
| 260 |
+
dataset = kwargs['dataset']
|
| 261 |
+
batch_mask = np.array(dataset) == 'Taskonomy'
|
| 262 |
+
if np.sum(batch_mask) == 0:
|
| 263 |
+
return torch.sum(prediction) * 0.0
|
| 264 |
+
ins_planes_mask = kwargs['sem_mask'] #
|
| 265 |
+
assert ins_planes_mask.ndim == 4
|
| 266 |
+
loss = self.get_loss(
|
| 267 |
+
prediction[batch_mask],
|
| 268 |
+
target[batch_mask],
|
| 269 |
+
ins_planes_mask[batch_mask],
|
| 270 |
+
intrinsic[batch_mask],
|
| 271 |
+
)
|
| 272 |
+
return loss
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
if __name__ == '__main__':
|
| 276 |
+
import cv2
|
| 277 |
+
vnl_loss = PWNPlanesLoss()
|
| 278 |
+
pred_depth = torch.rand([2, 1, 385, 513]).cuda()
|
| 279 |
+
gt_depth = torch.rand([2, 1, 385, 513]).cuda()
|
| 280 |
+
gt_depth[:, :, 3:20, 40:60] = 0
|
| 281 |
+
mask_kp1 = pred_depth > 0.9
|
| 282 |
+
mask_kp2 = pred_depth < 0.5
|
| 283 |
+
mask = torch.zeros_like(gt_depth, dtype=torch.uint8)
|
| 284 |
+
mask = 1*mask_kp1 + 2* mask_kp2
|
| 285 |
+
mask[1,...] = 0
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
intrinsic = torch.tensor([[100, 0, 50], [0, 100, 50,], [0,0,1]]).cuda().float()
|
| 289 |
+
intrins = torch.stack([intrinsic, intrinsic], dim=0)
|
| 290 |
+
loss = vnl_loss(gt_depth, gt_depth, mask, intrins, dataset=np.array(['Taskonomy', 'Taskonomy']))
|
| 291 |
+
print(loss)
|
external/Metric3D/training/mono/model/losses/Ranking.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import os
|
| 7 |
+
|
| 8 |
+
"""
|
| 9 |
+
Sampling strategies: RS (Random Sampling), EGS (Edge-Guided Sampling), and IGS (Instance-Guided Sampling)
|
| 10 |
+
"""
|
| 11 |
+
###########
|
| 12 |
+
# RANDOM SAMPLING
|
| 13 |
+
# input:
|
| 14 |
+
# predictions[i,:], targets[i, :], masks[i, :], self.mask_value, self.point_pairs
|
| 15 |
+
# return:
|
| 16 |
+
# inputs_A, inputs_B, targets_A, targets_B, consistent_masks_A, consistent_masks_B
|
| 17 |
+
###########
|
| 18 |
+
def randomSampling(predictions, targets, masks, threshold, sample_num):
|
| 19 |
+
|
| 20 |
+
# find A-B point pairs from predictions
|
| 21 |
+
inputs_index = torch.masked_select(predictions, targets.gt(threshold))
|
| 22 |
+
num_effect_pixels = len(inputs_index)
|
| 23 |
+
shuffle_effect_pixels = torch.randperm(num_effect_pixels, device="cuda")
|
| 24 |
+
inputs_A = inputs_index[shuffle_effect_pixels[0:sample_num*2:2]]
|
| 25 |
+
inputs_B = inputs_index[shuffle_effect_pixels[1:sample_num*2:2]]
|
| 26 |
+
# find corresponding pairs from GT
|
| 27 |
+
target_index = torch.masked_select(targets, targets.gt(threshold))
|
| 28 |
+
targets_A = target_index[shuffle_effect_pixels[0:sample_num*2:2]]
|
| 29 |
+
targets_B = target_index[shuffle_effect_pixels[1:sample_num*2:2]]
|
| 30 |
+
# only compute the losses of point pairs with valid GT
|
| 31 |
+
consistent_masks_index = torch.masked_select(masks, targets.gt(threshold))
|
| 32 |
+
consistent_masks_A = consistent_masks_index[shuffle_effect_pixels[0:sample_num*2:2]]
|
| 33 |
+
consistent_masks_B = consistent_masks_index[shuffle_effect_pixels[1:sample_num*2:2]]
|
| 34 |
+
|
| 35 |
+
# The amount of A and B should be the same!!
|
| 36 |
+
if len(targets_A) > len(targets_B):
|
| 37 |
+
targets_A = targets_A[:-1]
|
| 38 |
+
inputs_A = inputs_A[:-1]
|
| 39 |
+
consistent_masks_A = consistent_masks_A[:-1]
|
| 40 |
+
|
| 41 |
+
return inputs_A, inputs_B, targets_A, targets_B, consistent_masks_A, consistent_masks_B
|
| 42 |
+
|
| 43 |
+
###########
|
| 44 |
+
# EDGE-GUIDED SAMPLING
|
| 45 |
+
# input:
|
| 46 |
+
# predictions[i,:], targets[i, :], masks[i, :], edges_img[i], thetas_img[i], masks[i, :], h, w
|
| 47 |
+
# return:
|
| 48 |
+
# inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B
|
| 49 |
+
###########
|
| 50 |
+
def ind2sub(idx, cols):
|
| 51 |
+
r = torch.div(idx, cols, rounding_mode='floor') #idx // cols
|
| 52 |
+
c = idx % cols
|
| 53 |
+
return r, c
|
| 54 |
+
|
| 55 |
+
def sub2ind(r, c, cols):
|
| 56 |
+
idx = (r * cols + c).int()
|
| 57 |
+
return idx
|
| 58 |
+
|
| 59 |
+
def edgeGuidedSampling(predictions, targets, edges_img, thetas_img, masks, h, w):
|
| 60 |
+
|
| 61 |
+
# find edges
|
| 62 |
+
edges_max = edges_img.max()
|
| 63 |
+
edges_mask = edges_img.ge(edges_max*0.1)
|
| 64 |
+
edges_loc = edges_mask.nonzero()
|
| 65 |
+
|
| 66 |
+
inputs_edge = torch.masked_select(predictions, edges_mask)
|
| 67 |
+
targets_edge = torch.masked_select(targets, edges_mask)
|
| 68 |
+
thetas_edge = torch.masked_select(thetas_img, edges_mask)
|
| 69 |
+
minlen = inputs_edge.size()[0]
|
| 70 |
+
|
| 71 |
+
# find anchor points (i.e, edge points)
|
| 72 |
+
sample_num = minlen
|
| 73 |
+
index_anchors = torch.randint(0, minlen, (sample_num,), dtype=torch.long, device="cuda")
|
| 74 |
+
anchors = torch.gather(inputs_edge, 0, index_anchors)
|
| 75 |
+
theta_anchors = torch.gather(thetas_edge, 0, index_anchors)
|
| 76 |
+
row_anchors, col_anchors = ind2sub(edges_loc[index_anchors].squeeze(1), w)
|
| 77 |
+
## compute the coordinates of 4-points, distances are from [2, 30]
|
| 78 |
+
distance_matrix = torch.randint(2, 40, (4,sample_num), device="cuda")
|
| 79 |
+
pos_or_neg = torch.ones(4, sample_num, device="cuda")
|
| 80 |
+
pos_or_neg[:2,:] = -pos_or_neg[:2,:]
|
| 81 |
+
distance_matrix = distance_matrix.float() * pos_or_neg
|
| 82 |
+
col = col_anchors.unsqueeze(0).expand(4, sample_num).long() + torch.round(distance_matrix.float() * torch.abs(torch.cos(theta_anchors)).unsqueeze(0)).long()
|
| 83 |
+
row = row_anchors.unsqueeze(0).expand(4, sample_num).long() + torch.round(distance_matrix.float() * torch.abs(torch.sin(theta_anchors)).unsqueeze(0)).long()
|
| 84 |
+
|
| 85 |
+
# constrain 0=<c<=w, 0<=r<=h
|
| 86 |
+
# Note: index should minus 1
|
| 87 |
+
col[col<0] = 0
|
| 88 |
+
col[col>w-1] = w-1
|
| 89 |
+
row[row<0] = 0
|
| 90 |
+
row[row>h-1] = h-1
|
| 91 |
+
|
| 92 |
+
# a-b, b-c, c-d
|
| 93 |
+
a = sub2ind(row[0,:], col[0,:], w)
|
| 94 |
+
b = sub2ind(row[1,:], col[1,:], w)
|
| 95 |
+
c = sub2ind(row[2,:], col[2,:], w)
|
| 96 |
+
d = sub2ind(row[3,:], col[3,:], w)
|
| 97 |
+
A = torch.cat((a,b,c), 0)
|
| 98 |
+
B = torch.cat((b,c,d), 0)
|
| 99 |
+
|
| 100 |
+
inputs_A = torch.gather(predictions, 0, A.long())
|
| 101 |
+
inputs_B = torch.gather(predictions, 0, B.long())
|
| 102 |
+
targets_A = torch.gather(targets, 0, A.long())
|
| 103 |
+
targets_B = torch.gather(targets, 0, B.long())
|
| 104 |
+
masks_A = torch.gather(masks, 0, A.long())
|
| 105 |
+
masks_B = torch.gather(masks, 0, B.long())
|
| 106 |
+
|
| 107 |
+
# create A, B, C, D mask for visualization
|
| 108 |
+
# vis_mask = masks.reshape(h, w).cpu().numpy()
|
| 109 |
+
# vis_row = row.cpu()
|
| 110 |
+
# vis_col = col.cpu()
|
| 111 |
+
# visual_A = np.zeros((h, w)).astype(np.bool)
|
| 112 |
+
# visual_B = np.zeros_like(visual_A)
|
| 113 |
+
# visual_C = np.zeros_like(visual_A)
|
| 114 |
+
# visual_D = np.zeros_like(visual_A)
|
| 115 |
+
# visual_A[vis_row[0, :], vis_col[0, :]] = True
|
| 116 |
+
# visual_B[vis_row[1, :], vis_col[1, :]] = True
|
| 117 |
+
# visual_C[vis_row[2, :], vis_col[2, :]] = True
|
| 118 |
+
# visual_D[vis_row[3, :], vis_col[3, :]] = True
|
| 119 |
+
# visual_ABCD = [visual_A & vis_mask, visual_B & vis_mask,
|
| 120 |
+
# visual_C& vis_mask, visual_D& vis_mask]
|
| 121 |
+
return inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B, sample_num
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
######################################################
|
| 125 |
+
# Ranking loss (Random sampling)
|
| 126 |
+
#####################################################
|
| 127 |
+
class RankingLoss(nn.Module):
|
| 128 |
+
def __init__(self, point_pairs=5000, sigma=0.03, alpha=1.0, mask_value=-1e-8, loss_weight=1, **kwargs):
|
| 129 |
+
super(RankingLoss, self).__init__()
|
| 130 |
+
self.point_pairs = point_pairs # number of point pairs
|
| 131 |
+
self.sigma = sigma # used for determining the ordinal relationship between a selected pair
|
| 132 |
+
self.alpha = alpha # used for balancing the effect of = and (<,>)
|
| 133 |
+
self.mask_value = mask_value
|
| 134 |
+
self.loss_weight = loss_weight
|
| 135 |
+
self.eps = 1e-6
|
| 136 |
+
|
| 137 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 138 |
+
n,c,h,w = target.size()
|
| 139 |
+
if mask == None:
|
| 140 |
+
mask = target > self.mask_value
|
| 141 |
+
if n != 1:
|
| 142 |
+
prediction = prediction.view(n, -1)#.double()
|
| 143 |
+
target = target.view(n, -1)#.double()
|
| 144 |
+
mask = mask.view(n, -1)#.double()
|
| 145 |
+
else:
|
| 146 |
+
prediction = prediction.contiguous().view(1, -1)#.double()
|
| 147 |
+
target = target.contiguous().view(1, -1)#.double()
|
| 148 |
+
mask = mask.contiguous().view(1, -1)#.double()
|
| 149 |
+
|
| 150 |
+
loss = 0.0 #torch.tensor([0.0]).cuda()
|
| 151 |
+
valid_samples = 0
|
| 152 |
+
for i in range(n):
|
| 153 |
+
# find A-B point pairs
|
| 154 |
+
inputs_A, inputs_B, targets_A, targets_B, consistent_masks_A, consistent_masks_B = randomSampling(prediction[i,:], target[i, :], mask[i, :], self.mask_value, self.point_pairs)
|
| 155 |
+
|
| 156 |
+
#GT ordinal relationship
|
| 157 |
+
target_ratio = torch.div(targets_A, targets_B+self.eps)
|
| 158 |
+
mask_eq = target_ratio.lt(1.0 + self.sigma) * target_ratio.gt(1.0/(1.0+self.sigma))
|
| 159 |
+
labels = torch.zeros_like(target_ratio)
|
| 160 |
+
labels[target_ratio.ge(1.0 + self.sigma)] = 1
|
| 161 |
+
labels[target_ratio.le(1.0/(1.0+self.sigma))] = -1
|
| 162 |
+
|
| 163 |
+
# consider forward-backward consistency checking, only compute the losses of point pairs with valid GT
|
| 164 |
+
consistency_mask = consistent_masks_A & consistent_masks_B
|
| 165 |
+
|
| 166 |
+
# compute loss
|
| 167 |
+
equal_loss = (inputs_A - inputs_B).pow(2)[mask_eq & consistency_mask]
|
| 168 |
+
unequal_loss = torch.log(1 + torch.exp((-inputs_A + inputs_B) * labels))[(~mask_eq) & consistency_mask]
|
| 169 |
+
|
| 170 |
+
loss = loss + self.alpha * equal_loss.sum() + unequal_loss.sum()
|
| 171 |
+
valid_samples = valid_samples + unequal_loss.numel() + equal_loss.numel()
|
| 172 |
+
loss = loss / (valid_samples + self.eps)
|
| 173 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 174 |
+
raise RuntimeError(f'VNL error, {loss}')
|
| 175 |
+
return loss * self.loss_weight
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
######################################################
|
| 182 |
+
# EdgeguidedRankingLoss (with regularization term)
|
| 183 |
+
# Please comment regularization_loss if you don't want to use multi-scale gradient matching term
|
| 184 |
+
#####################################################
|
| 185 |
+
class EdgeguidedRankingLoss(nn.Module):
|
| 186 |
+
def __init__(self, point_pairs=5000, sigma=0.03, alpha=1.0, mask_value=1e-6, loss_weight=1.0, data_type=['rel', 'sfm', 'stereo', 'lidar'], **kwargs):
|
| 187 |
+
super(EdgeguidedRankingLoss, self).__init__()
|
| 188 |
+
self.point_pairs = point_pairs # number of point pairs
|
| 189 |
+
self.sigma = sigma # used for determining the ordinal relationship between a selected pair
|
| 190 |
+
self.alpha = alpha # used for balancing the effect of = and (<,>)
|
| 191 |
+
self.mask_value = mask_value
|
| 192 |
+
self.loss_weight = loss_weight
|
| 193 |
+
self.data_type = data_type
|
| 194 |
+
self.eps = 1e-6
|
| 195 |
+
|
| 196 |
+
def getEdge(self, images):
|
| 197 |
+
n,c,h,w = images.size()
|
| 198 |
+
a = torch.tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], dtype=torch.float32, device="cuda").view((1,1,3,3)).repeat(1, 1, 1, 1)
|
| 199 |
+
b = torch.tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]], dtype=torch.float32, device="cuda").view((1,1,3,3)).repeat(1, 1, 1, 1)
|
| 200 |
+
if c == 3:
|
| 201 |
+
gradient_x = F.conv2d(images[:,0,:,:].unsqueeze(1), a)
|
| 202 |
+
gradient_y = F.conv2d(images[:,0,:,:].unsqueeze(1), b)
|
| 203 |
+
else:
|
| 204 |
+
gradient_x = F.conv2d(images, a)
|
| 205 |
+
gradient_y = F.conv2d(images, b)
|
| 206 |
+
edges = torch.sqrt(torch.pow(gradient_x,2)+ torch.pow(gradient_y,2))
|
| 207 |
+
edges = F.pad(edges, (1,1,1,1), "constant", 0)
|
| 208 |
+
thetas = torch.atan2(gradient_y, gradient_x)
|
| 209 |
+
thetas = F.pad(thetas, (1,1,1,1), "constant", 0)
|
| 210 |
+
|
| 211 |
+
return edges, thetas
|
| 212 |
+
|
| 213 |
+
def visual_check(self, rgb, samples):
|
| 214 |
+
rgb = rgb.cpu().squeeze().numpy()
|
| 215 |
+
|
| 216 |
+
mean = np.array([123.675, 116.28, 103.53])[:, np.newaxis, np.newaxis]
|
| 217 |
+
std= np.array([58.395, 57.12, 57.375])[:, np.newaxis, np.newaxis]
|
| 218 |
+
|
| 219 |
+
rgb = ((rgb * std) + mean).astype(np.uint8).transpose((1, 2, 0))
|
| 220 |
+
mask_A, mask_B, mask_C, mask_D = samples
|
| 221 |
+
rgb[mask_A.astype(np.bool)] = [255, 0, 0]
|
| 222 |
+
rgb[mask_B.astype(np.bool)] = [0, 255, 0]
|
| 223 |
+
rgb[mask_C.astype(np.bool)] = [0, 0, 255]
|
| 224 |
+
rgb[mask_D.astype(np.bool)] = [255, 255, 0]
|
| 225 |
+
|
| 226 |
+
filename = str(np.random.randint(10000))
|
| 227 |
+
save_path = os.path.join('test_ranking', filename + '.png')
|
| 228 |
+
os.makedirs(os.path.dirname(save_path), exist_ok=True)
|
| 229 |
+
plt.imsave(save_path, rgb)
|
| 230 |
+
|
| 231 |
+
def forward(self, prediction, target, mask=None, input=None, **kwargs):
|
| 232 |
+
loss = self.get_loss(prediction, target, mask, input, **kwargs)
|
| 233 |
+
return loss
|
| 234 |
+
|
| 235 |
+
def get_loss(self, prediction, target, mask=None, input=None, **kwargs):
|
| 236 |
+
if mask == None:
|
| 237 |
+
mask = target > self.mask_value
|
| 238 |
+
# find edges from RGB
|
| 239 |
+
edges_img, thetas_img = self.getEdge(input)
|
| 240 |
+
# find edges from target depths
|
| 241 |
+
edges_depth, thetas_depth = self.getEdge(target)
|
| 242 |
+
|
| 243 |
+
#=============================
|
| 244 |
+
n,c,h,w = target.size()
|
| 245 |
+
if n != 1:
|
| 246 |
+
prediction = prediction.view(n, -1)#.double()
|
| 247 |
+
target = target.view(n, -1)#.double()
|
| 248 |
+
mask = mask.view(n, -1)#.double()
|
| 249 |
+
edges_img = edges_img.view(n, -1)#.double()
|
| 250 |
+
thetas_img = thetas_img.view(n, -1)#.double()
|
| 251 |
+
edges_depth = edges_depth.view(n, -1)#.double()
|
| 252 |
+
thetas_depth = thetas_depth.view(n, -1)#.double()
|
| 253 |
+
else:
|
| 254 |
+
prediction = prediction.contiguous().view(1, -1)#.double()
|
| 255 |
+
target = target.contiguous().view(1, -1)#.double()
|
| 256 |
+
mask = mask.contiguous().view(1, -1)#.double()
|
| 257 |
+
edges_img = edges_img.contiguous().view(1, -1)#.double()
|
| 258 |
+
thetas_img = thetas_img.contiguous().view(1, -1)#.double()
|
| 259 |
+
edges_depth = edges_depth.view(1, -1)#.double()
|
| 260 |
+
thetas_depth = thetas_depth.view(1, -1)#.double()
|
| 261 |
+
|
| 262 |
+
# initialization
|
| 263 |
+
loss = 0.0 #torch.tensor([0.0]).cuda()
|
| 264 |
+
valid_samples = 0
|
| 265 |
+
|
| 266 |
+
for i in range(n):
|
| 267 |
+
# Edge-Guided sampling from RGB predictions, targets, edges_img, thetas_img, masks, h, w
|
| 268 |
+
inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B, sample_num = edgeGuidedSampling(
|
| 269 |
+
prediction[i,:],
|
| 270 |
+
target[i, :],
|
| 271 |
+
edges_img[i],
|
| 272 |
+
thetas_img[i],
|
| 273 |
+
mask[i, :],
|
| 274 |
+
h,
|
| 275 |
+
w
|
| 276 |
+
)
|
| 277 |
+
# # Edge-Guided sampling from depth
|
| 278 |
+
# inputs_A_depth, inputs_B_depth, targets_A_depth, targets_B_depth, masks_A_depth, masks_B_depth, sample_num_depth = edgeGuidedSampling(
|
| 279 |
+
# prediction[i,:],
|
| 280 |
+
# target[i, :],
|
| 281 |
+
# edges_depth[i],
|
| 282 |
+
# thetas_depth[i],
|
| 283 |
+
# mask[i, :],
|
| 284 |
+
# h,
|
| 285 |
+
# w
|
| 286 |
+
# )
|
| 287 |
+
|
| 288 |
+
# Random Sampling predictions, targets, masks, threshold, sample_num
|
| 289 |
+
random_sample_num = sample_num
|
| 290 |
+
random_inputs_A, random_inputs_B, random_targets_A, random_targets_B, random_masks_A, random_masks_B = randomSampling(
|
| 291 |
+
prediction[i,:],
|
| 292 |
+
target[i, :],
|
| 293 |
+
mask[i, :],
|
| 294 |
+
self.mask_value,
|
| 295 |
+
random_sample_num
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
# Combine EGS + RS + EGS_depth
|
| 299 |
+
inputs_A_merge = torch.cat((inputs_A, random_inputs_A,), 0)
|
| 300 |
+
inputs_B_merge = torch.cat((inputs_B, random_inputs_B,), 0)
|
| 301 |
+
targets_A_merge = torch.cat((targets_A, random_targets_A,), 0)
|
| 302 |
+
targets_B_merge = torch.cat((targets_B, random_targets_B,), 0)
|
| 303 |
+
masks_A_merge = torch.cat((masks_A, random_masks_A,), 0)
|
| 304 |
+
masks_B_merge = torch.cat((masks_B, random_masks_B,), 0)
|
| 305 |
+
|
| 306 |
+
#GT ordinal relationship
|
| 307 |
+
target_ratio = torch.div(targets_A_merge + 1e-6, targets_B_merge + 1e-6)
|
| 308 |
+
mask_eq = target_ratio.lt(1.0 + self.sigma) & target_ratio.gt(1.0/(1.0+self.sigma))
|
| 309 |
+
labels = torch.zeros_like(target_ratio)
|
| 310 |
+
labels[target_ratio.ge(1.0 + self.sigma)] = 1
|
| 311 |
+
labels[target_ratio.le(1.0/(1.0+self.sigma))] = -1
|
| 312 |
+
|
| 313 |
+
# consider forward-backward consistency checking, i.e, only compute losses of point pairs with valid GT
|
| 314 |
+
consistency_mask = masks_A_merge & masks_B_merge
|
| 315 |
+
|
| 316 |
+
equal_loss = (inputs_A_merge - inputs_B_merge).pow(2)[mask_eq & consistency_mask]
|
| 317 |
+
unequal_loss = torch.log(1 + torch.exp((-inputs_A_merge + inputs_B_merge) * labels))[(~mask_eq) & consistency_mask]
|
| 318 |
+
|
| 319 |
+
loss = loss + self.alpha * torch.sum(equal_loss) + torch.sum(unequal_loss)
|
| 320 |
+
valid_samples = valid_samples + equal_loss.numel()
|
| 321 |
+
valid_samples = valid_samples + unequal_loss.numel()
|
| 322 |
+
loss = loss / (valid_samples + self.eps)
|
| 323 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 324 |
+
raise RuntimeError(f'VNL error, {loss}')
|
| 325 |
+
return loss * self.loss_weight
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
if __name__ == '__main__':
|
| 329 |
+
import cv2
|
| 330 |
+
|
| 331 |
+
rank_loss = EdgeguidedRankingLoss()
|
| 332 |
+
pred_depth = np.random.randn(2, 1, 480, 640)
|
| 333 |
+
gt_depth = np.ones((2, 1, 480, 640)) #np.random.randn(2, 1, 480, 640)
|
| 334 |
+
# gt_depth = cv2.imread('/hardware/yifanliu/SUNRGBD/sunrgbd-meta-data/sunrgbd_test_depth/2.png', -1)
|
| 335 |
+
# gt_depth = gt_depth[None, :, :, None]
|
| 336 |
+
# pred_depth = gt_depth[:, :, ::-1, :]
|
| 337 |
+
gt_depth = torch.tensor(np.asarray(gt_depth, np.float32)).cuda()
|
| 338 |
+
pred_depth = torch.tensor(np.asarray(pred_depth, np.float32)).cuda()
|
| 339 |
+
input = np.random.randn(2, 3, 480, 640)
|
| 340 |
+
input_torch = torch.tensor(np.asarray(input, np.float32)).cuda()
|
| 341 |
+
loss = rank_loss(gt_depth, gt_depth, gt_depth>0, input=input_torch)
|
| 342 |
+
print(loss)
|
external/Metric3D/training/mono/model/losses/Regularization.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class RegularizationLoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Enforce losses on pixels without any gts.
|
| 7 |
+
"""
|
| 8 |
+
def __init__(self, loss_weight=0.1, data_type=['sfm', 'stereo', 'lidar'], **kwargs):
|
| 9 |
+
super(RegularizationLoss, self).__init__()
|
| 10 |
+
self.loss_weight = loss_weight
|
| 11 |
+
self.data_type = data_type
|
| 12 |
+
self.eps = 1e-6
|
| 13 |
+
|
| 14 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 15 |
+
pred_wo_gt = prediction[~mask]
|
| 16 |
+
#loss = - torch.sum(pred_wo_gt) / (pred_wo_gt.numel() + 1e-8)
|
| 17 |
+
loss = 1/ (torch.sum(pred_wo_gt) / (pred_wo_gt.numel() + self.eps))
|
| 18 |
+
return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/SSIL.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class SSILoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Scale shift invariant MAE loss.
|
| 7 |
+
loss = MAE((d-median(d)/s - (d'-median(d'))/s'), s = mean(d- median(d))
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self, loss_weight=1, data_type=['sfm', 'stereo', 'lidar'], **kwargs):
|
| 10 |
+
super(SSILoss, self).__init__()
|
| 11 |
+
self.loss_weight = loss_weight
|
| 12 |
+
self.data_type = data_type
|
| 13 |
+
self.eps = 1e-6
|
| 14 |
+
|
| 15 |
+
def ssi_mae(self, target, prediction, mask):
|
| 16 |
+
valid_pixes = torch.sum(mask) + self.eps
|
| 17 |
+
|
| 18 |
+
gt_median = torch.median(target) if target.numel() else 0
|
| 19 |
+
gt_s = torch.abs(target - gt_median).sum() / valid_pixes
|
| 20 |
+
gt_trans = (target - gt_median) / (gt_s + self.eps)
|
| 21 |
+
|
| 22 |
+
pred_median = torch.median(prediction) if prediction.numel() else 0
|
| 23 |
+
pred_s = torch.abs(prediction - pred_median).sum() / valid_pixes
|
| 24 |
+
pred_trans = (prediction - pred_median) / (pred_s + self.eps)
|
| 25 |
+
|
| 26 |
+
ssi_mae_sum = torch.sum(torch.abs(gt_trans - pred_trans))
|
| 27 |
+
return ssi_mae_sum, valid_pixes
|
| 28 |
+
|
| 29 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 30 |
+
"""
|
| 31 |
+
Calculate loss.
|
| 32 |
+
"""
|
| 33 |
+
B, C, H, W = prediction.shape
|
| 34 |
+
loss = 0
|
| 35 |
+
valid_pix = 0
|
| 36 |
+
for i in range(B):
|
| 37 |
+
mask_i = mask[i, ...]
|
| 38 |
+
gt_depth_i = target[i, ...][mask_i]
|
| 39 |
+
pred_depth_i = prediction[i, ...][mask_i]
|
| 40 |
+
ssi_sum, valid_pix_i = self.ssi_mae(pred_depth_i, gt_depth_i, mask_i)
|
| 41 |
+
loss += ssi_sum
|
| 42 |
+
valid_pix += valid_pix_i
|
| 43 |
+
loss /= (valid_pix + self.eps)
|
| 44 |
+
return loss * self.loss_weight
|
| 45 |
+
|
| 46 |
+
if __name__ == '__main__':
|
| 47 |
+
torch.manual_seed(1)
|
| 48 |
+
torch.cuda.manual_seed_all(1)
|
| 49 |
+
|
| 50 |
+
ssil = SSILoss()
|
| 51 |
+
pred = torch.rand((2, 1, 256, 256)).cuda()
|
| 52 |
+
gt = torch.rand((2, 1, 256, 256)).cuda()#torch.zeros_like(pred).cuda() #
|
| 53 |
+
gt[:, :, 100:256, 0:100] = -1
|
| 54 |
+
mask = gt > 0
|
| 55 |
+
out = ssil(pred, gt, mask)
|
| 56 |
+
print(out)
|
external/Metric3D/training/mono/model/losses/ScaleAlignLoss.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class ScaleAlignLoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Loss function defined over sequence of depth predictions
|
| 7 |
+
"""
|
| 8 |
+
def __init__(self, data_type=['lidar', 'denselidar', 'stereo', 'denselidar_syn'], loss_weight=1.0, disable_dataset=['MapillaryPSD'], **kwargs):
|
| 9 |
+
super(ScaleAlignLoss, self).__init__()
|
| 10 |
+
self.loss_weight = loss_weight
|
| 11 |
+
self.data_type = data_type
|
| 12 |
+
self.disable_dataset = disable_dataset
|
| 13 |
+
|
| 14 |
+
def forward(self, prediction, target, mask, scale, **kwargs):
|
| 15 |
+
device = target.device
|
| 16 |
+
|
| 17 |
+
B, C, H, W = prediction.shape
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# median_pred, _ = torch.median(prediction.view(B, C*H*W), 1)
|
| 21 |
+
# median_pred = median_pred.detach()
|
| 22 |
+
|
| 23 |
+
# scale_factor = torch.zeros_like(scale).squeeze(3).squeeze(2).squeeze(1)
|
| 24 |
+
# for i in range(B):
|
| 25 |
+
# mask_i = mask[i, ...]
|
| 26 |
+
# if torch.sum(mask_i) > 10:
|
| 27 |
+
# scale_factor[i] = torch.median(target[i, ...][mask_i]) / (torch.median(prediction[i, ...][mask_i]) + 1e-8)
|
| 28 |
+
# else:
|
| 29 |
+
# scale_factor[i] = 0
|
| 30 |
+
|
| 31 |
+
# target_scale = (median_pred * scale_factor)
|
| 32 |
+
|
| 33 |
+
# batches_dataset = kwargs['dataset']
|
| 34 |
+
# self.batch_valid = torch.tensor([1 if batch_dataset not in self.disable_dataset else 0 \
|
| 35 |
+
# for batch_dataset in batches_dataset], device=device)
|
| 36 |
+
|
| 37 |
+
# batch_valid = self.batch_valid * (scale_factor > 1e-8)
|
| 38 |
+
|
| 39 |
+
# scale_diff = torch.abs(scale.squeeze(3).squeeze(2).squeeze(1) - scale_factor * median_pred)
|
| 40 |
+
|
| 41 |
+
batches_dataset = kwargs['dataset']
|
| 42 |
+
self.batch_valid = torch.tensor([1 if batch_dataset not in self.disable_dataset else 0 \
|
| 43 |
+
for batch_dataset in batches_dataset], device=device)
|
| 44 |
+
|
| 45 |
+
scale_tgt = torch.zeros_like(scale).squeeze(3).squeeze(2).squeeze(1)
|
| 46 |
+
for i in range(B):
|
| 47 |
+
mask_i = mask[i, ...]
|
| 48 |
+
if torch.sum(mask_i) > 10:
|
| 49 |
+
scale_tgt[i] = torch.median(target[i, ...][mask_i])
|
| 50 |
+
else:
|
| 51 |
+
scale_tgt[i] = 0
|
| 52 |
+
|
| 53 |
+
batch_valid = self.batch_valid * (scale_tgt > 1e-8)
|
| 54 |
+
scale_diff = torch.abs(scale.squeeze(3).squeeze(2).squeeze(1) - scale_tgt)
|
| 55 |
+
loss = torch.sum(scale_diff * batch_valid) / (torch.sum(batch_valid) + 1e-8)
|
| 56 |
+
|
| 57 |
+
return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/ScaleInvL1.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class ScaleInvL1Loss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Compute scale-invariant L1 loss.
|
| 7 |
+
"""
|
| 8 |
+
def __init__(self, loss_weight=1, data_type=['sfm', 'denselidar_nometric', 'denselidar_syn'], **kwargs):
|
| 9 |
+
super(ScaleInvL1Loss, self).__init__()
|
| 10 |
+
self.loss_weight = loss_weight
|
| 11 |
+
self.data_type = data_type
|
| 12 |
+
self.eps = 1e-6
|
| 13 |
+
|
| 14 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 15 |
+
B, _, _, _ = target.shape
|
| 16 |
+
target_nan = target.clone()
|
| 17 |
+
target_nan[~mask] = torch.nan
|
| 18 |
+
median_target = torch.nanmedian(target_nan.view(B, -1), dim=1)[0]
|
| 19 |
+
prediction_nan = prediction.clone().detach()
|
| 20 |
+
prediction_nan[~mask] = torch.nan
|
| 21 |
+
median_prediction = torch.nanmedian(prediction_nan.view(B, -1), dim=1)[0]
|
| 22 |
+
scale = median_target / median_prediction
|
| 23 |
+
scale[torch.isnan(scale)] = 0
|
| 24 |
+
pred_scale = prediction * scale[:, None, None, None]
|
| 25 |
+
|
| 26 |
+
target_valid = target * mask
|
| 27 |
+
pred_valid = pred_scale * mask
|
| 28 |
+
diff = torch.abs(pred_valid - target_valid)
|
| 29 |
+
# disp_diff = diff / (target_valid + self.eps)
|
| 30 |
+
loss = torch.sum(diff) / (torch.sum(mask) + self.eps)
|
| 31 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 32 |
+
loss = 0 * torch.sum(prediction)
|
| 33 |
+
print(f'Scale-invariant L1 NAN error, {loss}')
|
| 34 |
+
#raise RuntimeError(f'Silog error, {loss}, d_square_mean: {d_square_mean}, d_mean: {d_mean}')
|
| 35 |
+
return loss * self.loss_weight
|
external/Metric3D/training/mono/model/losses/SiLog.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
class SilogLoss(nn.Module):
|
| 5 |
+
"""
|
| 6 |
+
Compute SILog loss. See https://papers.nips.cc/paper/2014/file/7bccfde7714a1ebadf06c5f4cea752c1-Paper.pdf for
|
| 7 |
+
more information about scale-invariant loss.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self, variance_focus=0.5, loss_weight=1, data_type=['stereo', 'lidar'], **kwargs):
|
| 10 |
+
super(SilogLoss, self).__init__()
|
| 11 |
+
self.variance_focus = variance_focus
|
| 12 |
+
self.loss_weight = loss_weight
|
| 13 |
+
self.data_type = data_type
|
| 14 |
+
self.eps = 1e-6
|
| 15 |
+
|
| 16 |
+
def silog_loss(self, prediction, target, mask):
|
| 17 |
+
d = torch.log(prediction[mask]) - torch.log(target[mask])
|
| 18 |
+
d_square_mean = torch.sum(d ** 2) / (d.numel() + self.eps)
|
| 19 |
+
d_mean = torch.sum(d) / (d.numel() + self.eps)
|
| 20 |
+
loss = d_square_mean - self.variance_focus * (d_mean ** 2)
|
| 21 |
+
return loss
|
| 22 |
+
|
| 23 |
+
def forward(self, prediction, target, mask=None, **kwargs):
|
| 24 |
+
if target[mask].numel() > 0:
|
| 25 |
+
loss = self.silog_loss(prediction, target, mask)
|
| 26 |
+
else:
|
| 27 |
+
loss = 0 * torch.sum(prediction)
|
| 28 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 29 |
+
raise RuntimeError(f'Silog error, {loss}, d_square_mean: {d_square_mean}, d_mean: {d_mean}')
|
| 30 |
+
return loss * self.loss_weight
|
| 31 |
+
|
| 32 |
+
if __name__ == '__main__':
|
| 33 |
+
silog = SilogLoss()
|
| 34 |
+
pred = torch.rand((2, 3, 256, 256)).cuda()
|
| 35 |
+
gt = torch.zeros_like(pred) #torch.rand((2, 3, 256, 256)).cuda()
|
| 36 |
+
mask = gt > 0
|
| 37 |
+
out = silog(pred, gt, mask)
|
| 38 |
+
print(out)
|
external/Metric3D/training/mono/model/losses/SkyRegularization.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
class SkyRegularizationLoss(nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Enforce losses on pixels without any gts.
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self, loss_weight=0.1, data_type=['sfm', 'stereo', 'lidar', 'denselidar', 'denselidar_nometric', 'denselidar_syn'], sky_id=142, sample_ratio=0.4, regress_value=1.8, normal_regress=None, normal_weight=1.0, **kwargs):
|
| 11 |
+
super(SkyRegularizationLoss, self).__init__()
|
| 12 |
+
self.loss_weight = loss_weight
|
| 13 |
+
self.data_type = data_type
|
| 14 |
+
self.sky_id = sky_id
|
| 15 |
+
self.sample_ratio = sample_ratio
|
| 16 |
+
self.eps = 1e-6
|
| 17 |
+
self.regress_value = regress_value
|
| 18 |
+
self.normal_regress = normal_regress
|
| 19 |
+
self.normal_weight = normal_weight
|
| 20 |
+
|
| 21 |
+
def loss1(self, pred_sky):
|
| 22 |
+
loss = 1/ torch.exp((torch.sum(pred_sky) / (pred_sky.numel() + self.eps)))
|
| 23 |
+
return loss
|
| 24 |
+
|
| 25 |
+
def loss2(self, pred_sky):
|
| 26 |
+
loss = torch.sum(torch.abs(pred_sky - self.regress_value)) / (pred_sky.numel() + self.eps)
|
| 27 |
+
return loss
|
| 28 |
+
|
| 29 |
+
def loss_norm(self, pred_norm, sky_mask):
|
| 30 |
+
sky_norm = torch.FloatTensor(self.normal_regress).cuda()
|
| 31 |
+
sky_norm = sky_norm.unsqueeze(0).unsqueeze(2).unsqueeze(3)
|
| 32 |
+
dot = torch.cosine_similarity(pred_norm[:, :3, :, :].clone(), sky_norm, dim=1)
|
| 33 |
+
|
| 34 |
+
sky_mask_float = sky_mask.float().squeeze()
|
| 35 |
+
valid_mask = sky_mask_float \
|
| 36 |
+
* (dot.detach() < 0.999).float() \
|
| 37 |
+
* (dot.detach() > -0.999).float()
|
| 38 |
+
|
| 39 |
+
al = (1 - dot) * valid_mask
|
| 40 |
+
loss = torch.sum(al) / (torch.sum(sky_mask_float) + self.eps)
|
| 41 |
+
return loss
|
| 42 |
+
|
| 43 |
+
def forward(self, prediction, target, prediction_normal=None, mask=None, sem_mask=None, **kwargs):
|
| 44 |
+
sky_mask = sem_mask == self.sky_id
|
| 45 |
+
pred_sky = prediction[sky_mask]
|
| 46 |
+
pred_sky_numel = pred_sky.numel()
|
| 47 |
+
|
| 48 |
+
if pred_sky.numel() > 50:
|
| 49 |
+
samples = np.random.choice(pred_sky_numel, int(pred_sky_numel*self.sample_ratio), replace=False)
|
| 50 |
+
|
| 51 |
+
if pred_sky.numel() > 0:
|
| 52 |
+
#loss = - torch.sum(pred_wo_gt) / (pred_wo_gt.numel() + 1e-8)
|
| 53 |
+
loss = self.loss2(pred_sky)
|
| 54 |
+
|
| 55 |
+
if (prediction_normal != None) and (self.normal_regress != None):
|
| 56 |
+
loss_normal = self.loss_norm(prediction_normal, sky_mask)
|
| 57 |
+
loss = loss + loss_normal * self.normal_weight
|
| 58 |
+
|
| 59 |
+
else:
|
| 60 |
+
loss = torch.sum(prediction) * 0
|
| 61 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 62 |
+
loss = torch.sum(prediction) * 0
|
| 63 |
+
print(f'SkyRegularization NAN error, {loss}')
|
| 64 |
+
# raise RuntimeError(f'Sky Loss error, {loss}')
|
| 65 |
+
|
| 66 |
+
return loss * self.loss_weight
|
| 67 |
+
|
| 68 |
+
if __name__ == '__main__':
|
| 69 |
+
import cv2
|
| 70 |
+
sky = SkyRegularizationLoss()
|
| 71 |
+
pred_depth = np.random.random([2, 1, 480, 640])
|
| 72 |
+
gt_depth = np.zeros_like(pred_depth) #np.random.random([2, 1, 480, 640])
|
| 73 |
+
intrinsic = [[[100, 0, 200], [0, 100, 200], [0, 0, 1]], [[100, 0, 200], [0, 100, 200], [0, 0, 1]],]
|
| 74 |
+
gt_depth = torch.tensor(np.array(gt_depth, np.float32)).cuda()
|
| 75 |
+
pred_depth = torch.tensor(np.array(pred_depth, np.float32)).cuda()
|
| 76 |
+
intrinsic = torch.tensor(np.array(intrinsic, np.float32)).cuda()
|
| 77 |
+
mask = gt_depth > 0
|
| 78 |
+
loss1 = sky(pred_depth, gt_depth, mask, mask, intrinsic)
|
| 79 |
+
print(loss1)
|
external/Metric3D/training/mono/model/losses/VNL.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class VNLoss(nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Virtual Normal Loss.
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self,
|
| 11 |
+
delta_cos=0.867, delta_diff_x=0.01,
|
| 12 |
+
delta_diff_y=0.01, delta_diff_z=0.01,
|
| 13 |
+
delta_z=1e-5, sample_ratio=0.15,
|
| 14 |
+
loss_weight=1.0, data_type=['sfm', 'stereo', 'lidar', 'denselidar', 'denselidar_nometric', 'denselidar_syn'], **kwargs):
|
| 15 |
+
super(VNLoss, self).__init__()
|
| 16 |
+
self.delta_cos = delta_cos
|
| 17 |
+
self.delta_diff_x = delta_diff_x
|
| 18 |
+
self.delta_diff_y = delta_diff_y
|
| 19 |
+
self.delta_diff_z = delta_diff_z
|
| 20 |
+
self.delta_z = delta_z
|
| 21 |
+
self.sample_ratio = sample_ratio
|
| 22 |
+
self.loss_weight = loss_weight
|
| 23 |
+
self.data_type = data_type
|
| 24 |
+
self.eps = 1e-6
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def init_image_coor(self, intrinsic, height, width):
|
| 28 |
+
# x_row = torch.arange(0, W, device="cuda")
|
| 29 |
+
# x = torch.tile(x_row, (H, 1))
|
| 30 |
+
# x = x.to(torch.float32)
|
| 31 |
+
# u_m_u0 = x[None, None, :, :] - u0
|
| 32 |
+
# self.register_buffer('u_m_u0', u_m_u0, persistent=False)
|
| 33 |
+
|
| 34 |
+
# y_col = torch.arange(0, H, device="cuda") # y_col = np.arange(0, height)
|
| 35 |
+
# y = torch.transpose(torch.tile(y_col, (W, 1)), 1, 0)
|
| 36 |
+
# y = y.to(torch.float32)
|
| 37 |
+
# v_m_v0 = y[None, None, :, :] - v0
|
| 38 |
+
# self.register_buffer('v_m_v0', v_m_v0, persistent=False)
|
| 39 |
+
|
| 40 |
+
# pix_idx_mat = torch.arange(H*W, device="cuda").reshape((H, W))
|
| 41 |
+
# self.register_buffer('pix_idx_mat', pix_idx_mat, persistent=False)
|
| 42 |
+
#self.pix_idx_mat = torch.arange(height*width, device="cuda").reshape((height, width))
|
| 43 |
+
|
| 44 |
+
u0 = intrinsic[:, 0, 2][:, None, None, None]
|
| 45 |
+
v0 = intrinsic[:, 1, 2][:, None, None, None]
|
| 46 |
+
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device="cuda"),
|
| 47 |
+
torch.arange(0, width, dtype=torch.float32, device="cuda")], indexing='ij')
|
| 48 |
+
u_m_u0 = x[None, None, :, :] - u0
|
| 49 |
+
v_m_v0 = y[None, None, :, :] - v0
|
| 50 |
+
# return u_m_u0, v_m_v0
|
| 51 |
+
self.register_buffer('v_m_v0', v_m_v0, persistent=False)
|
| 52 |
+
self.register_buffer('u_m_u0', u_m_u0, persistent=False)
|
| 53 |
+
|
| 54 |
+
def transfer_xyz(self, depth, focal_length, u_m_u0, v_m_v0):
|
| 55 |
+
x = u_m_u0 * depth / focal_length
|
| 56 |
+
y = v_m_v0 * depth / focal_length
|
| 57 |
+
z = depth
|
| 58 |
+
pw = torch.cat([x, y, z], 1).permute(0, 2, 3, 1).contiguous() # [b, h, w, c]
|
| 59 |
+
return pw
|
| 60 |
+
|
| 61 |
+
def select_index(self, B, H, W, mask):
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
p1 = []
|
| 66 |
+
p2 = []
|
| 67 |
+
p3 = []
|
| 68 |
+
pix_idx_mat = torch.arange(H*W, device="cuda").reshape((H, W))
|
| 69 |
+
for i in range(B):
|
| 70 |
+
inputs_index = torch.masked_select(pix_idx_mat, mask[i, ...].gt(self.eps))
|
| 71 |
+
num_effect_pixels = len(inputs_index)
|
| 72 |
+
|
| 73 |
+
intend_sample_num = int(H * W * self.sample_ratio)
|
| 74 |
+
sample_num = intend_sample_num if num_effect_pixels >= intend_sample_num else num_effect_pixels
|
| 75 |
+
|
| 76 |
+
shuffle_effect_pixels = torch.randperm(num_effect_pixels, device="cuda")
|
| 77 |
+
p1i = inputs_index[shuffle_effect_pixels[:sample_num]]
|
| 78 |
+
shuffle_effect_pixels = torch.randperm(num_effect_pixels, device="cuda")
|
| 79 |
+
p2i = inputs_index[shuffle_effect_pixels[:sample_num]]
|
| 80 |
+
shuffle_effect_pixels = torch.randperm(num_effect_pixels, device="cuda")
|
| 81 |
+
p3i = inputs_index[shuffle_effect_pixels[:sample_num]]
|
| 82 |
+
|
| 83 |
+
cat_null = torch.tensor(([0,] * (intend_sample_num - sample_num)), dtype=torch.long, device="cuda")
|
| 84 |
+
p1i = torch.cat([p1i, cat_null])
|
| 85 |
+
p2i = torch.cat([p2i, cat_null])
|
| 86 |
+
p3i = torch.cat([p3i, cat_null])
|
| 87 |
+
|
| 88 |
+
p1.append(p1i)
|
| 89 |
+
p2.append(p2i)
|
| 90 |
+
p3.append(p3i)
|
| 91 |
+
|
| 92 |
+
p1 = torch.stack(p1, dim=0)
|
| 93 |
+
p2 = torch.stack(p2, dim=0)
|
| 94 |
+
p3 = torch.stack(p3, dim=0)
|
| 95 |
+
|
| 96 |
+
p1_x = p1 % W
|
| 97 |
+
p1_y = torch.div(p1, W, rounding_mode='trunc').long() # p1 // W
|
| 98 |
+
|
| 99 |
+
p2_x = p2 % W
|
| 100 |
+
p2_y = torch.div(p2, W, rounding_mode='trunc').long() # p2 // W
|
| 101 |
+
|
| 102 |
+
p3_x = p3 % W
|
| 103 |
+
p3_y = torch.div(p3, W, rounding_mode='trunc').long() # p3 // W
|
| 104 |
+
p123 = {'p1_x': p1_x, 'p1_y': p1_y, 'p2_x': p2_x, 'p2_y': p2_y, 'p3_x': p3_x, 'p3_y': p3_y}
|
| 105 |
+
return p123
|
| 106 |
+
|
| 107 |
+
def form_pw_groups(self, p123, pw):
|
| 108 |
+
"""
|
| 109 |
+
Form 3D points groups, with 3 points in each grouup.
|
| 110 |
+
:param p123: points index
|
| 111 |
+
:param pw: 3D points
|
| 112 |
+
:return:
|
| 113 |
+
"""
|
| 114 |
+
B, _, _, _ = pw.shape
|
| 115 |
+
p1_x = p123['p1_x']
|
| 116 |
+
p1_y = p123['p1_y']
|
| 117 |
+
p2_x = p123['p2_x']
|
| 118 |
+
p2_y = p123['p2_y']
|
| 119 |
+
p3_x = p123['p3_x']
|
| 120 |
+
p3_y = p123['p3_y']
|
| 121 |
+
|
| 122 |
+
pw_groups = []
|
| 123 |
+
for i in range(B):
|
| 124 |
+
pw1 = pw[i, p1_y[i], p1_x[i], :]
|
| 125 |
+
pw2 = pw[i, p2_y[i], p2_x[i], :]
|
| 126 |
+
pw3 = pw[i, p3_y[i], p3_x[i], :]
|
| 127 |
+
pw_bi = torch.stack([pw1, pw2, pw3], dim=2)
|
| 128 |
+
pw_groups.append(pw_bi)
|
| 129 |
+
# [B, N, 3(x,y,z), 3(p1,p2,p3)]
|
| 130 |
+
pw_groups = torch.stack(pw_groups, dim=0)
|
| 131 |
+
return pw_groups
|
| 132 |
+
|
| 133 |
+
def filter_mask(self, p123, gt_xyz, delta_cos=0.867,
|
| 134 |
+
delta_diff_x=0.005,
|
| 135 |
+
delta_diff_y=0.005,
|
| 136 |
+
delta_diff_z=0.005):
|
| 137 |
+
pw = self.form_pw_groups(p123, gt_xyz)
|
| 138 |
+
pw12 = pw[:, :, :, 1] - pw[:, :, :, 0]
|
| 139 |
+
pw13 = pw[:, :, :, 2] - pw[:, :, :, 0]
|
| 140 |
+
pw23 = pw[:, :, :, 2] - pw[:, :, :, 1]
|
| 141 |
+
###ignore linear
|
| 142 |
+
pw_diff = torch.cat([pw12[:, :, :, np.newaxis], pw13[:, :, :, np.newaxis], pw23[:, :, :, np.newaxis]],
|
| 143 |
+
3) # [b, n, 3, 3]
|
| 144 |
+
m_batchsize, groups, coords, index = pw_diff.shape
|
| 145 |
+
proj_query = pw_diff.view(m_batchsize * groups, -1, index).permute(0, 2, 1).contiguous() # (B* X CX(3)) [bn, 3(p123), 3(xyz)]
|
| 146 |
+
proj_key = pw_diff.contiguous().view(m_batchsize * groups, -1, index) # B X (3)*C [bn, 3(xyz), 3(p123)]
|
| 147 |
+
q_norm = proj_query.norm(2, dim=2)
|
| 148 |
+
nm = torch.bmm(q_norm.contiguous().view(m_batchsize * groups, index, 1), q_norm.view(m_batchsize * groups, 1, index)) #[]
|
| 149 |
+
energy = torch.bmm(proj_query, proj_key) # transpose check [bn, 3(p123), 3(p123)]
|
| 150 |
+
norm_energy = energy / (nm + self.eps)
|
| 151 |
+
norm_energy = norm_energy.contiguous().view(m_batchsize * groups, -1)
|
| 152 |
+
mask_cos = torch.sum((norm_energy > delta_cos) + (norm_energy < -delta_cos), 1) > 3 # igonre
|
| 153 |
+
mask_cos = mask_cos.contiguous().view(m_batchsize, groups)
|
| 154 |
+
##ignore padding and invilid depth
|
| 155 |
+
mask_pad = torch.sum(pw[:, :, 2, :] > self.delta_z, 2) == 3
|
| 156 |
+
|
| 157 |
+
###ignore near
|
| 158 |
+
mask_x = torch.sum(torch.abs(pw_diff[:, :, 0, :]) < delta_diff_x, 2) > 0
|
| 159 |
+
mask_y = torch.sum(torch.abs(pw_diff[:, :, 1, :]) < delta_diff_y, 2) > 0
|
| 160 |
+
mask_z = torch.sum(torch.abs(pw_diff[:, :, 2, :]) < delta_diff_z, 2) > 0
|
| 161 |
+
|
| 162 |
+
mask_ignore = (mask_x & mask_y & mask_z) | mask_cos
|
| 163 |
+
mask_near = ~mask_ignore
|
| 164 |
+
mask = mask_pad & mask_near
|
| 165 |
+
|
| 166 |
+
return mask, pw
|
| 167 |
+
|
| 168 |
+
def select_points_groups(self, gt_depth, pred_depth, intrinsic, mask):
|
| 169 |
+
B, C, H, W = gt_depth.shape
|
| 170 |
+
focal_length = intrinsic[:, 0, 0][:, None, None, None]
|
| 171 |
+
u_m_u0, v_m_v0 = self.u_m_u0, self.v_m_v0 # self.init_image_coor(intrinsic, height=H, width=W)
|
| 172 |
+
|
| 173 |
+
pw_gt = self.transfer_xyz(gt_depth, focal_length, u_m_u0, v_m_v0)
|
| 174 |
+
pw_pred = self.transfer_xyz(pred_depth, focal_length, u_m_u0, v_m_v0)
|
| 175 |
+
|
| 176 |
+
p123 = self.select_index(B, H, W, mask)
|
| 177 |
+
# mask:[b, n], pw_groups_gt: [b, n, 3(x,y,z), 3(p1,p2,p3)]
|
| 178 |
+
mask, pw_groups_gt = self.filter_mask(p123, pw_gt,
|
| 179 |
+
delta_cos=0.867,
|
| 180 |
+
delta_diff_x=0.005,
|
| 181 |
+
delta_diff_y=0.005,
|
| 182 |
+
delta_diff_z=0.005)
|
| 183 |
+
|
| 184 |
+
# [b, n, 3, 3]
|
| 185 |
+
pw_groups_pred = self.form_pw_groups(p123, pw_pred)
|
| 186 |
+
pw_groups_pred[pw_groups_pred[:, :, 2, :] == 0] = 0.0001
|
| 187 |
+
mask_broadcast = mask.repeat(1, 9).reshape(B, 3, 3, -1).permute(0, 3, 1, 2).contiguous()
|
| 188 |
+
pw_groups_pred_not_ignore = pw_groups_pred[mask_broadcast].reshape(1, -1, 3, 3)
|
| 189 |
+
pw_groups_gt_not_ignore = pw_groups_gt[mask_broadcast].reshape(1, -1, 3, 3)
|
| 190 |
+
|
| 191 |
+
return pw_groups_gt_not_ignore, pw_groups_pred_not_ignore
|
| 192 |
+
|
| 193 |
+
def forward(self, prediction, target, mask, intrinsic, select=True, **kwargs): #gt_depth, pred_depth, select=True):
|
| 194 |
+
"""
|
| 195 |
+
Virtual normal loss.
|
| 196 |
+
:param prediction: predicted depth map, [B,W,H,C]
|
| 197 |
+
:param data: target label, ground truth depth, [B, W, H, C], padding region [padding_up, padding_down]
|
| 198 |
+
:return:
|
| 199 |
+
"""
|
| 200 |
+
loss = self.get_loss(prediction, target, mask, intrinsic, select, **kwargs)
|
| 201 |
+
return loss
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def get_loss(self, prediction, target, mask, intrinsic, select=True, **kwargs):
|
| 205 |
+
# configs for the cameras
|
| 206 |
+
# focal_length = intrinsic[:, 0, 0][:, None, None, None]
|
| 207 |
+
# u0 = intrinsic[:, 0, 2][:, None, None, None]
|
| 208 |
+
# v0 = intrinsic[:, 1, 2][:, None, None, None]
|
| 209 |
+
B, _, H, W = target.shape
|
| 210 |
+
if 'u_m_u0' not in self._buffers or 'v_m_v0' not in self._buffers \
|
| 211 |
+
or self.u_m_u0.shape != torch.Size([B,1,H,W]) or self.v_m_v0.shape != torch.Size([B,1,H,W]):
|
| 212 |
+
self.init_image_coor(intrinsic, H, W)
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
gt_points, pred_points = self.select_points_groups(target, prediction, intrinsic, mask)
|
| 216 |
+
|
| 217 |
+
gt_p12 = gt_points[:, :, :, 1] - gt_points[:, :, :, 0]
|
| 218 |
+
gt_p13 = gt_points[:, :, :, 2] - gt_points[:, :, :, 0]
|
| 219 |
+
pred_p12 = pred_points[:, :, :, 1] - pred_points[:, :, :, 0]
|
| 220 |
+
pred_p13 = pred_points[:, :, :, 2] - pred_points[:, :, :, 0]
|
| 221 |
+
|
| 222 |
+
gt_normal = torch.cross(gt_p12, gt_p13, dim=2)
|
| 223 |
+
pred_normal = torch.cross(pred_p12, pred_p13, dim=2)
|
| 224 |
+
pred_norm = torch.norm(pred_normal, 2, dim=2, keepdim=True)
|
| 225 |
+
gt_norm = torch.norm(gt_normal, 2, dim=2, keepdim=True)
|
| 226 |
+
pred_mask = pred_norm == 0.0
|
| 227 |
+
gt_mask = gt_norm == 0.0
|
| 228 |
+
pred_mask = pred_mask.to(torch.float32)
|
| 229 |
+
gt_mask = gt_mask.to(torch.float32)
|
| 230 |
+
pred_mask *= self.eps
|
| 231 |
+
gt_mask *= self.eps
|
| 232 |
+
gt_norm = gt_norm + gt_mask
|
| 233 |
+
pred_norm = pred_norm + pred_mask
|
| 234 |
+
gt_normal = gt_normal / gt_norm
|
| 235 |
+
pred_normal = pred_normal / pred_norm
|
| 236 |
+
loss = torch.abs(gt_normal - pred_normal)
|
| 237 |
+
loss = torch.sum(torch.sum(loss, dim=2), dim=0)
|
| 238 |
+
if select:
|
| 239 |
+
loss, indices = torch.sort(loss, dim=0, descending=False)
|
| 240 |
+
loss = loss[int(loss.size(0) * 0.25):]
|
| 241 |
+
loss = torch.sum(loss) / (loss.numel() + self.eps)
|
| 242 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 243 |
+
loss = 0 * torch.sum(prediction)
|
| 244 |
+
print(f'VNL NAN error, {loss}')
|
| 245 |
+
return loss * self.loss_weight
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
if __name__ == '__main__':
|
| 249 |
+
import cv2
|
| 250 |
+
vnl_loss = VNLoss()
|
| 251 |
+
pred_depth = np.random.random([2, 1, 480, 640])
|
| 252 |
+
gt_depth = np.zeros_like(pred_depth) #np.random.random([2, 1, 480, 640])
|
| 253 |
+
intrinsic = [[[100, 0, 200], [0, 100, 200], [0, 0, 1]], [[100, 0, 200], [0, 100, 200], [0, 0, 1]],]
|
| 254 |
+
gt_depth = torch.tensor(np.array(gt_depth, np.float32)).cuda()
|
| 255 |
+
pred_depth = torch.tensor(np.array(pred_depth, np.float32)).cuda()
|
| 256 |
+
intrinsic = torch.tensor(np.array(intrinsic, np.float32)).cuda()
|
| 257 |
+
mask = gt_depth > 0
|
| 258 |
+
loss1 = vnl_loss(pred_depth, gt_depth, mask, intrinsic)
|
| 259 |
+
loss2 = vnl_loss(pred_depth, gt_depth, mask, intrinsic)
|
| 260 |
+
print(loss1, loss2)
|
external/Metric3D/training/mono/model/losses/WCEL.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class WCELoss(nn.Module):
|
| 7 |
+
"""
|
| 8 |
+
Weighted Cross-entropy Loss Function.
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self, depth_normalize, out_channel=200, loss_weight=1.0, data_type=['stereo', 'lidar'], **kwargs):
|
| 11 |
+
super(WCELoss, self).__init__()
|
| 12 |
+
self.loss_weight = loss_weight
|
| 13 |
+
self.depth_min = depth_normalize[0]
|
| 14 |
+
self.depth_max = depth_normalize[1]
|
| 15 |
+
self.bins_num = out_channel
|
| 16 |
+
self.depth_min_log = torch.log10(torch.tensor(self.depth_min))
|
| 17 |
+
|
| 18 |
+
self.alpha = 2 #0.2
|
| 19 |
+
self.config_bins()
|
| 20 |
+
self.noise_sample_ratio = 0.9 #kwargs['noise_sample_ratio'] if 'noise_sample_ratio' in kwargs else 1.0
|
| 21 |
+
self.data_type = data_type
|
| 22 |
+
self.eps = 1e-6
|
| 23 |
+
|
| 24 |
+
def config_bins(self):
|
| 25 |
+
# Modify some configs
|
| 26 |
+
self.depth_bins_interval = (torch.log10(torch.tensor(self.depth_max)) -
|
| 27 |
+
self.depth_min_log) / self.bins_num
|
| 28 |
+
bins_edges_in_log = self.depth_min_log + self.depth_bins_interval * torch.tensor(list(range(self.bins_num)) + [self.bins_num,])
|
| 29 |
+
#bins_edges_in_log = torch.from_numpy(bins_edges_in_log)
|
| 30 |
+
# The boundary of each bin
|
| 31 |
+
# bins_edges_in_log = np.array([self.depth_min_log + self.depth_bins_interval * (i + 0.5)
|
| 32 |
+
# for i in range(self.bins_num)])
|
| 33 |
+
bins_weight = torch.tensor([[np.exp(-self.alpha * (i - j) ** 2) for i in range(self.bins_num )]
|
| 34 |
+
for j in np.arange(self.bins_num )]).cuda()
|
| 35 |
+
self.register_buffer("bins_weight", bins_weight.float(), persistent=False)
|
| 36 |
+
self.register_buffer("bins_edges_in_log", bins_edges_in_log.float(), persistent=False)
|
| 37 |
+
|
| 38 |
+
def depth_to_bins_in_log(self, depth, mask):
|
| 39 |
+
"""
|
| 40 |
+
Discretize depth into depth bins. Predefined bins edges are in log space.
|
| 41 |
+
Mark invalid padding area as bins_num + 1
|
| 42 |
+
Args:
|
| 43 |
+
@depth: 1-channel depth, [B, 1, h, w]
|
| 44 |
+
return: depth bins [B, C, h, w]
|
| 45 |
+
"""
|
| 46 |
+
invalid_mask = ~mask
|
| 47 |
+
#depth[depth < self.depth_min] = self.depth_min
|
| 48 |
+
#depth[depth > self.depth_max] = self.depth_max
|
| 49 |
+
mask_lower = (depth <= self.depth_min)
|
| 50 |
+
mask_higher = (depth >= self.depth_max)
|
| 51 |
+
depth_bins_log = ((torch.log10(torch.abs(depth)) - self.depth_min_log) / self.depth_bins_interval).to(torch.int)
|
| 52 |
+
|
| 53 |
+
depth_bins_log[mask_lower] = 0
|
| 54 |
+
depth_bins_log[mask_higher] = self.bins_num - 1
|
| 55 |
+
depth_bins_log[depth_bins_log == self.bins_num] = self.bins_num - 1
|
| 56 |
+
|
| 57 |
+
depth_bins_log[invalid_mask] = self.bins_num + 1
|
| 58 |
+
return depth_bins_log
|
| 59 |
+
|
| 60 |
+
def depth_to_bins(self, depth, mask, depth_edges, size_limite=(300, 300)):
|
| 61 |
+
"""
|
| 62 |
+
Discretize depth into depth bins. Predefined bins edges are provided.
|
| 63 |
+
Mark invalid padding area as bins_num + 1
|
| 64 |
+
Args:
|
| 65 |
+
@depth: 1-channel depth, [B, 1, h, w]
|
| 66 |
+
return: depth bins [B, C, h, w]
|
| 67 |
+
"""
|
| 68 |
+
def _depth_to_bins_block_(depth, mask, depth_edges):
|
| 69 |
+
bins_id = torch.sum(depth_edges[:, None, None, None, :] < torch.abs(depth)[:, :, :, :, None], dim=-1)
|
| 70 |
+
bins_id = bins_id - 1
|
| 71 |
+
invalid_mask = ~mask
|
| 72 |
+
mask_lower = (depth <= self.depth_min)
|
| 73 |
+
mask_higher = (depth >= self.depth_max)
|
| 74 |
+
|
| 75 |
+
bins_id[mask_lower] = 0
|
| 76 |
+
bins_id[mask_higher] = self.bins_num - 1
|
| 77 |
+
bins_id[bins_id == self.bins_num] = self.bins_num - 1
|
| 78 |
+
|
| 79 |
+
bins_id[invalid_mask] = self.bins_num + 1
|
| 80 |
+
return bins_id
|
| 81 |
+
_, _, H, W = depth.shape
|
| 82 |
+
bins = mask.clone().long()
|
| 83 |
+
h_blocks = np.ceil(H / size_limite[0]).astype(np.int)
|
| 84 |
+
w_blocks = np.ceil(W/ size_limite[1]).astype(np.int)
|
| 85 |
+
for i in range(h_blocks):
|
| 86 |
+
for j in range(w_blocks):
|
| 87 |
+
h_start = i*size_limite[0]
|
| 88 |
+
h_end_proposal = (i + 1) * size_limite[0]
|
| 89 |
+
h_end = h_end_proposal if h_end_proposal < H else H
|
| 90 |
+
w_start = j*size_limite[1]
|
| 91 |
+
w_end_proposal = (j + 1) * size_limite[1]
|
| 92 |
+
w_end = w_end_proposal if w_end_proposal < W else W
|
| 93 |
+
bins_ij = _depth_to_bins_block_(
|
| 94 |
+
depth[:, :, h_start:h_end, w_start:w_end],
|
| 95 |
+
mask[:, :, h_start:h_end, w_start:w_end],
|
| 96 |
+
depth_edges
|
| 97 |
+
)
|
| 98 |
+
bins[:, :, h_start:h_end, w_start:w_end] = bins_ij
|
| 99 |
+
return bins
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# def mask_maximum_loss(self, loss_pixels, mask):
|
| 103 |
+
# mask = mask.reshape(mask.size(0), -1)
|
| 104 |
+
# valid_pix_bt = torch.sum(mask, dim=1)
|
| 105 |
+
# mask_noise_num = (valid_pix_bt * self.noise_sample_ratio).int()
|
| 106 |
+
|
| 107 |
+
# loss_sample = []
|
| 108 |
+
# for i in range(loss_pixels.size(0)):
|
| 109 |
+
# sorted_losses, _ = torch.sort(loss_pixels[i, :][mask[i, ...]])
|
| 110 |
+
# loss_sample.append(torch.sum(sorted_losses[:mask_noise_num[i]]))
|
| 111 |
+
|
| 112 |
+
# return torch.tensor(loss_sample), mask_noise_num
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def forward(self, prediction, target, mask=None, pred_logit=None, **kwargs): #pred_logit, gt_bins, gt):
|
| 116 |
+
B, _, H, W = target.shape
|
| 117 |
+
if 'bins_edges' not in kwargs or kwargs['bins_edges'] is None:
|
| 118 |
+
# predefined depth bins in log space
|
| 119 |
+
gt_bins = self.depth_to_bins_in_log(target, mask)
|
| 120 |
+
else:
|
| 121 |
+
bins_edges = kwargs['bins_edges']
|
| 122 |
+
gt_bins = self.depth_to_bins(target, mask, bins_edges)
|
| 123 |
+
|
| 124 |
+
classes_range = torch.arange(self.bins_num, device=gt_bins.device, dtype=gt_bins.dtype)
|
| 125 |
+
log_pred = torch.nn.functional.log_softmax(pred_logit, 1)
|
| 126 |
+
log_pred = log_pred.reshape(B, log_pred.size(1), -1).permute((0, 2, 1))
|
| 127 |
+
gt_reshape = gt_bins.reshape((B, -1))[:, :, None]
|
| 128 |
+
one_hot = (gt_reshape == classes_range).to(dtype=torch.float, device=pred_logit.device)
|
| 129 |
+
weight = torch.matmul(one_hot, self.bins_weight)
|
| 130 |
+
weight_log_pred = weight * log_pred
|
| 131 |
+
loss_pixeles = - torch.sum(weight_log_pred, dim=2)
|
| 132 |
+
|
| 133 |
+
valid_pixels = torch.sum(mask).to(dtype=torch.float, device=pred_logit.device)
|
| 134 |
+
loss = torch.sum(loss_pixeles) / (valid_pixels + self.eps)
|
| 135 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 136 |
+
raise RuntimeError(f'WCEL error, {loss}')
|
| 137 |
+
return loss * self.loss_weight
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
if __name__ == '__main__':
|
| 142 |
+
import cv2
|
| 143 |
+
wcel = WCELoss((0.0004, 1))
|
| 144 |
+
pred_depth = np.abs(np.random.random([2, 1, 480, 640]))
|
| 145 |
+
pred_logit = np.random.random([2, 200, 480, 640])
|
| 146 |
+
gt_depth = np.random.random([2, 1, 480, 640]) - 0.5 #np.zeros_like(pred_depth) #
|
| 147 |
+
intrinsic = [[100, 100, 200, 200], [200, 200, 300, 300]]
|
| 148 |
+
gt_depth = torch.tensor(np.array(gt_depth, np.float32)).cuda()
|
| 149 |
+
pred_depth = torch.tensor(np.array(pred_depth, np.float32)).cuda()
|
| 150 |
+
intrinsic = torch.tensor(np.array(intrinsic, np.float32)).cuda()
|
| 151 |
+
pred_logit = torch.tensor(np.array(pred_logit, np.float32)).cuda()
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
mask = gt_depth > 0
|
| 155 |
+
loss1 = wcel(gt_depth, gt_depth, mask, intrinsic=intrinsic, pred_logit=pred_logit)
|
| 156 |
+
loss2 = wcel(gt_depth, gt_depth, mask, intrinsic=intrinsic, pred_logit=pred_logit)
|
| 157 |
+
print(loss1, loss2)
|
external/Metric3D/training/mono/model/losses/__init__.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .SiLog import SilogLoss
|
| 2 |
+
from .WCEL import WCELoss
|
| 3 |
+
from .VNL import VNLoss
|
| 4 |
+
from .Gradient import GradientLoss_Li, GradientLoss
|
| 5 |
+
from .Ranking import EdgeguidedRankingLoss, RankingLoss
|
| 6 |
+
from .Regularization import RegularizationLoss
|
| 7 |
+
from .SSIL import SSILoss
|
| 8 |
+
from .HDNL import HDNLoss
|
| 9 |
+
from .HDSNL import HDSNLoss
|
| 10 |
+
from .NormalRegression import EdgeguidedNormalLoss
|
| 11 |
+
from .depth_to_normal import Depth2Normal
|
| 12 |
+
from .photometric_loss_functions import PhotometricGeometricLoss
|
| 13 |
+
from .HDSNL_random import HDSNRandomLoss
|
| 14 |
+
from .HDNL_random import HDNRandomLoss
|
| 15 |
+
from .AdabinsLoss import AdabinsLoss
|
| 16 |
+
from .SkyRegularization import SkyRegularizationLoss
|
| 17 |
+
from .PWN_Planes import PWNPlanesLoss
|
| 18 |
+
from .L1 import L1Loss, L1DispLoss, L1InverseLoss
|
| 19 |
+
from .ConfidenceLoss import ConfidenceLoss
|
| 20 |
+
from .ScaleInvL1 import ScaleInvL1Loss
|
| 21 |
+
from .NormalBranchLoss import NormalBranchLoss, DeNoConsistencyLoss
|
| 22 |
+
from .GRUSequenceLoss import GRUSequenceLoss
|
| 23 |
+
from .ConfidenceGuideLoss import ConfidenceGuideLoss
|
| 24 |
+
from .ScaleAlignLoss import ScaleAlignLoss
|
| 25 |
+
|
| 26 |
+
__all__ = [
|
| 27 |
+
'SilogLoss', 'WCELoss', 'VNLoss', 'GradientLoss_Li', 'GradientLoss', 'EdgeguidedRankingLoss',
|
| 28 |
+
'RankingLoss', 'RegularizationLoss', 'SSILoss', 'HDNLoss', 'HDSNLoss', 'EdgeguidedNormalLoss', 'Depth2Normal',
|
| 29 |
+
'PhotometricGeometricLoss', 'HDSNRandomLoss', 'HDNRandomLoss', 'AdabinsLoss', 'SkyRegularizationLoss',
|
| 30 |
+
'PWNPlanesLoss', 'L1Loss',
|
| 31 |
+
'ConfidenceLoss', 'ScaleInvL1Loss', 'L1DispLoss', 'NormalBranchLoss', 'L1InverseLoss', 'GRUSequenceLoss', 'ConfidenceGuideLoss', 'DeNoConsistencyLoss', 'ScaleAlignLoss'
|
| 32 |
+
]
|
external/Metric3D/training/mono/model/losses/depth_to_normal.py
ADDED
|
@@ -0,0 +1,302 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
|
| 5 |
+
class Backprojection(nn.Module):
|
| 6 |
+
"""Layer to backproject a depth image given the camera intrinsics
|
| 7 |
+
Attributes
|
| 8 |
+
xy (Nx3x(HxW)): homogeneous pixel coordinates on regular grid
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self, height, width):
|
| 11 |
+
"""
|
| 12 |
+
Args:
|
| 13 |
+
height (int): image height
|
| 14 |
+
width (int): image width
|
| 15 |
+
"""
|
| 16 |
+
super(Backprojection, self).__init__()
|
| 17 |
+
|
| 18 |
+
self.height = height
|
| 19 |
+
self.width = width
|
| 20 |
+
|
| 21 |
+
# generate regular grid
|
| 22 |
+
meshgrid = np.meshgrid(range(self.width), range(self.height), indexing='xy')
|
| 23 |
+
id_coords = np.stack(meshgrid, axis=0).astype(np.float32)
|
| 24 |
+
id_coords = torch.tensor(id_coords, device="cuda")
|
| 25 |
+
|
| 26 |
+
# generate homogeneous pixel coordinates
|
| 27 |
+
# self.ones = nn.Parameter(torch.ones(1, 1, self.height * self.width),
|
| 28 |
+
# requires_grad=False)
|
| 29 |
+
ones = torch.ones(1, 1, self.height * self.width, device="cuda")
|
| 30 |
+
xy = torch.unsqueeze(
|
| 31 |
+
torch.stack([id_coords[0].view(-1), id_coords[1].view(-1)], 0),
|
| 32 |
+
0
|
| 33 |
+
)
|
| 34 |
+
xy = torch.cat([xy, ones], 1)
|
| 35 |
+
#self.xy = nn.Parameter(self.xy, requires_grad=False)
|
| 36 |
+
self.register_buffer('xy', xy, persistent=False)
|
| 37 |
+
self.register_buffer('ones', ones, persistent=False)
|
| 38 |
+
|
| 39 |
+
# for virtual camera only
|
| 40 |
+
horizontal_angle_range=[195.0, -15.0]
|
| 41 |
+
vertical_angle_range=[150.0, 0.0]
|
| 42 |
+
|
| 43 |
+
horizontal_sample_num=641
|
| 44 |
+
vertical_sample_num=481
|
| 45 |
+
|
| 46 |
+
self.horizontal_angle_range = horizontal_angle_range
|
| 47 |
+
self.vertical_angle_range = vertical_angle_range
|
| 48 |
+
self.horizontal_sample_num = horizontal_sample_num
|
| 49 |
+
self.vertical_sample_num = vertical_sample_num
|
| 50 |
+
|
| 51 |
+
self.horizontal_step = (self.horizontal_angle_range[1] - self.horizontal_angle_range[0]) / (
|
| 52 |
+
self.horizontal_sample_num - 1)
|
| 53 |
+
self.vertical_step = (self.vertical_angle_range[1] - self.vertical_angle_range[0]) / (
|
| 54 |
+
self.vertical_sample_num - 1)
|
| 55 |
+
|
| 56 |
+
self.horizontal_samples = np.arange(self.horizontal_angle_range[0], self.horizontal_angle_range[1],
|
| 57 |
+
self.horizontal_step)
|
| 58 |
+
self.vertical_samples = np.arange(self.vertical_angle_range[0], self.vertical_angle_range[1],
|
| 59 |
+
self.vertical_step)
|
| 60 |
+
|
| 61 |
+
horizontal_samples_in_rad = self.horizontal_samples / 180.0 * np.pi
|
| 62 |
+
vertical_samples_in_rad = self.vertical_samples / 180.0 * np.pi
|
| 63 |
+
|
| 64 |
+
virt_H = len(self.vertical_samples)
|
| 65 |
+
virt_W = len(self.horizontal_samples)
|
| 66 |
+
|
| 67 |
+
self.virt_H, self.virt_W = virt_H, virt_W
|
| 68 |
+
|
| 69 |
+
cos_theta = np.tile(np.cos(vertical_samples_in_rad).reshape(-1, 1), (1, virt_W))
|
| 70 |
+
sin_theta = np.tile(np.sin(vertical_samples_in_rad).reshape(-1, 1), (1, virt_W))
|
| 71 |
+
cos_phi = np.tile(np.cos(horizontal_samples_in_rad).reshape(1, -1), (virt_H, 1))
|
| 72 |
+
sin_phi = np.tile(np.sin(horizontal_samples_in_rad).reshape(1, -1), (virt_H, 1))
|
| 73 |
+
|
| 74 |
+
x = (sin_theta * cos_phi).reshape(1, virt_H, virt_W)
|
| 75 |
+
y = cos_theta.reshape(1, virt_H, virt_W)
|
| 76 |
+
z = (sin_theta * sin_phi).reshape(1, virt_H, virt_W)
|
| 77 |
+
|
| 78 |
+
self.dir_in_virt_cam = np.concatenate((x, y, z), axis=0)
|
| 79 |
+
self.dir_in_virt_cam = self.dir_in_virt_cam.reshape(3, self.virt_H * self.virt_W)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def forward(self, depth, inv_K, img_like_out=False):
|
| 83 |
+
"""
|
| 84 |
+
Args:
|
| 85 |
+
depth (Nx1xHxW): depth map
|
| 86 |
+
inv_K (Nx4x4): inverse camera intrinsics
|
| 87 |
+
img_like_out (bool): if True, the output shape is Nx4xHxW; else Nx4x(HxW)
|
| 88 |
+
Returns:
|
| 89 |
+
points (Nx4x(HxW)): 3D points in homogeneous coordinates
|
| 90 |
+
"""
|
| 91 |
+
depth = depth.contiguous()
|
| 92 |
+
|
| 93 |
+
xy = self.xy.repeat(depth.shape[0], 1, 1)
|
| 94 |
+
ones = self.ones.repeat(depth.shape[0],1,1)
|
| 95 |
+
|
| 96 |
+
points = torch.matmul(inv_K[:, :3, :3], xy)
|
| 97 |
+
points = depth.view(depth.shape[0], 1, -1) * points
|
| 98 |
+
points = torch.cat([points, ones], 1)
|
| 99 |
+
|
| 100 |
+
if img_like_out:
|
| 101 |
+
points = points.reshape(depth.shape[0], 4, self.height, self.width)
|
| 102 |
+
return points
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_surface_normalv2(xyz, patch_size=5, mask_valid=None):
|
| 106 |
+
"""
|
| 107 |
+
xyz: xyz coordinates, in [b, h, w, c]
|
| 108 |
+
patch: [p1, p2, p3,
|
| 109 |
+
p4, p5, p6,
|
| 110 |
+
p7, p8, p9]
|
| 111 |
+
surface_normal = [(p9-p1) x (p3-p7)] + [(p6-p4) - (p8-p2)]
|
| 112 |
+
return: normal [h, w, 3, b]
|
| 113 |
+
"""
|
| 114 |
+
b, h, w, c = xyz.shape
|
| 115 |
+
half_patch = patch_size // 2
|
| 116 |
+
|
| 117 |
+
if mask_valid == None:
|
| 118 |
+
mask_valid = xyz[:, :, :, 2] > 0 # [b, h, w]
|
| 119 |
+
mask_pad = torch.zeros((b, h + patch_size - 1, w + patch_size - 1), device=mask_valid.device).bool()
|
| 120 |
+
mask_pad[:, half_patch:-half_patch, half_patch:-half_patch] = mask_valid
|
| 121 |
+
|
| 122 |
+
xyz_pad = torch.zeros((b, h + patch_size - 1, w + patch_size - 1, c), dtype=xyz.dtype, device=xyz.device)
|
| 123 |
+
xyz_pad[:, half_patch:-half_patch, half_patch:-half_patch, :] = xyz
|
| 124 |
+
|
| 125 |
+
xyz_left = xyz_pad[:, half_patch:half_patch + h, :w, :] # p4
|
| 126 |
+
xyz_right = xyz_pad[:, half_patch:half_patch + h, -w:, :] # p6
|
| 127 |
+
xyz_top = xyz_pad[:, :h, half_patch:half_patch + w, :] # p2
|
| 128 |
+
xyz_bottom = xyz_pad[:, -h:, half_patch:half_patch + w, :] # p8
|
| 129 |
+
xyz_horizon = xyz_left - xyz_right # p4p6
|
| 130 |
+
xyz_vertical = xyz_top - xyz_bottom # p2p8
|
| 131 |
+
|
| 132 |
+
xyz_left_in = xyz_pad[:, half_patch:half_patch + h, 1:w+1, :] # p4
|
| 133 |
+
xyz_right_in = xyz_pad[:, half_patch:half_patch + h, patch_size-1:patch_size-1+w, :] # p6
|
| 134 |
+
xyz_top_in = xyz_pad[:, 1:h+1, half_patch:half_patch + w, :] # p2
|
| 135 |
+
xyz_bottom_in = xyz_pad[:, patch_size-1:patch_size-1+h, half_patch:half_patch + w, :] # p8
|
| 136 |
+
xyz_horizon_in = xyz_left_in - xyz_right_in # p4p6
|
| 137 |
+
xyz_vertical_in = xyz_top_in - xyz_bottom_in # p2p8
|
| 138 |
+
|
| 139 |
+
n_img_1 = torch.cross(xyz_horizon_in, xyz_vertical_in, dim=3)
|
| 140 |
+
n_img_2 = torch.cross(xyz_horizon, xyz_vertical, dim=3)
|
| 141 |
+
|
| 142 |
+
# re-orient normals consistently
|
| 143 |
+
orient_mask = torch.sum(n_img_1 * xyz, dim=3) > 0
|
| 144 |
+
n_img_1[orient_mask] *= -1
|
| 145 |
+
orient_mask = torch.sum(n_img_2 * xyz, dim=3) > 0
|
| 146 |
+
n_img_2[orient_mask] *= -1
|
| 147 |
+
|
| 148 |
+
n_img1_L2 = torch.sqrt(torch.sum(n_img_1 ** 2, dim=3, keepdim=True) + 1e-4)
|
| 149 |
+
n_img1_norm = n_img_1 / (n_img1_L2 + 1e-8)
|
| 150 |
+
|
| 151 |
+
n_img2_L2 = torch.sqrt(torch.sum(n_img_2 ** 2, dim=3, keepdim=True) + 1e-4)
|
| 152 |
+
n_img2_norm = n_img_2 / (n_img2_L2 + 1e-8)
|
| 153 |
+
|
| 154 |
+
# average 2 norms
|
| 155 |
+
n_img_aver = n_img1_norm + n_img2_norm
|
| 156 |
+
n_img_aver_L2 = torch.sqrt(torch.sum(n_img_aver ** 2, dim=3, keepdim=True) + 1e-4)
|
| 157 |
+
n_img_aver_norm = n_img_aver / (n_img_aver_L2 + 1e-8)
|
| 158 |
+
# re-orient normals consistently
|
| 159 |
+
orient_mask = torch.sum(n_img_aver_norm * xyz, dim=3) > 0
|
| 160 |
+
n_img_aver_norm[orient_mask] *= -1
|
| 161 |
+
#n_img_aver_norm_out = n_img_aver_norm.permute((1, 2, 3, 0)) # [h, w, c, b]
|
| 162 |
+
|
| 163 |
+
# get mask for normals
|
| 164 |
+
mask_p4p6 = mask_pad[:, half_patch:half_patch + h, :w] & mask_pad[:, half_patch:half_patch + h, -w:]
|
| 165 |
+
mask_p2p8 = mask_pad[:, :h, half_patch:half_patch + w] & mask_pad[:, -h:, half_patch:half_patch + w]
|
| 166 |
+
mask_normal = mask_p2p8 & mask_p4p6
|
| 167 |
+
n_img_aver_norm[~mask_normal] = 0
|
| 168 |
+
|
| 169 |
+
# a = torch.sum(n_img1_norm_out*n_img2_norm_out, dim=2).cpu().numpy().squeeze()
|
| 170 |
+
# plt.imshow(np.abs(a), cmap='rainbow')
|
| 171 |
+
# plt.show()
|
| 172 |
+
return n_img_aver_norm.permute(0, 3, 1, 2).contiguous(), mask_normal[:, None, :, :] # [b, h, w, 3]
|
| 173 |
+
|
| 174 |
+
class Depth2Normal(nn.Module):
|
| 175 |
+
"""Layer to compute surface normal from depth map
|
| 176 |
+
"""
|
| 177 |
+
def __init__(self,):
|
| 178 |
+
"""
|
| 179 |
+
Args:
|
| 180 |
+
height (int): image height
|
| 181 |
+
width (int): image width
|
| 182 |
+
"""
|
| 183 |
+
super(Depth2Normal, self).__init__()
|
| 184 |
+
|
| 185 |
+
def init_img_coor(self, height, width):
|
| 186 |
+
"""
|
| 187 |
+
Args:
|
| 188 |
+
height (int): image height
|
| 189 |
+
width (int): image width
|
| 190 |
+
"""
|
| 191 |
+
y, x = torch.meshgrid([torch.arange(0, height, dtype=torch.float32, device="cuda"),
|
| 192 |
+
torch.arange(0, width, dtype=torch.float32, device="cuda")], indexing='ij')
|
| 193 |
+
meshgrid = torch.stack((x, y))
|
| 194 |
+
|
| 195 |
+
# # generate regular grid
|
| 196 |
+
# meshgrid = np.meshgrid(range(width), range(height), indexing='xy')
|
| 197 |
+
# id_coords = np.stack(meshgrid, axis=0).astype(np.float32)
|
| 198 |
+
# id_coords = torch.tensor(id_coords)
|
| 199 |
+
|
| 200 |
+
# generate homogeneous pixel coordinates
|
| 201 |
+
ones = torch.ones((1, 1, height * width), device="cuda")
|
| 202 |
+
# xy = torch.unsqueeze(
|
| 203 |
+
# torch.stack([x.reshape(-1), y.reshape(-1)], 0),
|
| 204 |
+
# 0
|
| 205 |
+
# )
|
| 206 |
+
xy = meshgrid.reshape(2, -1).unsqueeze(0)
|
| 207 |
+
xy = torch.cat([xy, ones], 1)
|
| 208 |
+
|
| 209 |
+
self.register_buffer('xy', xy, persistent=False)
|
| 210 |
+
|
| 211 |
+
def back_projection(self, depth, inv_K, img_like_out=False, scale=1.0):
|
| 212 |
+
"""
|
| 213 |
+
Args:
|
| 214 |
+
depth (Nx1xHxW): depth map
|
| 215 |
+
inv_K (Nx4x4): inverse camera intrinsics
|
| 216 |
+
img_like_out (bool): if True, the output shape is Nx4xHxW; else Nx4x(HxW)
|
| 217 |
+
Returns:
|
| 218 |
+
points (Nx4x(HxW)): 3D points in homogeneous coordinates
|
| 219 |
+
"""
|
| 220 |
+
B, C, H, W = depth.shape
|
| 221 |
+
depth = depth.contiguous()
|
| 222 |
+
# xy = self.init_img_coor(height=H, width=W)
|
| 223 |
+
xy = self.xy # xy.repeat(depth.shape[0], 1, 1)
|
| 224 |
+
#ones = self.ones.repeat(depth.shape[0],1,1)
|
| 225 |
+
|
| 226 |
+
points = torch.matmul(inv_K[:, :3, :3], xy)
|
| 227 |
+
points = depth.view(depth.shape[0], 1, -1) * points
|
| 228 |
+
depth_descale = points[:, 2:3, :] / scale
|
| 229 |
+
points = torch.cat((points[:, 0:2, :], depth_descale), dim=1)
|
| 230 |
+
#points = torch.cat([points, ones], 1)
|
| 231 |
+
|
| 232 |
+
if img_like_out:
|
| 233 |
+
points = points.reshape(depth.shape[0], 3, H, W)
|
| 234 |
+
return points
|
| 235 |
+
|
| 236 |
+
# def transfer_xyz(self, u0, v0, H, W, depth, focal_length):
|
| 237 |
+
# x_row = np.arange(0, W)
|
| 238 |
+
# x = np.tile(x_row, (H, 1))
|
| 239 |
+
# x = x.astype(np.float32)
|
| 240 |
+
# x = torch.from_numpy(x.copy()).cuda()
|
| 241 |
+
# u_m_u0 = x[None, None, :, :] - u0
|
| 242 |
+
# self.register_buffer('u_m_u0', u_m_u0, persistent=False)
|
| 243 |
+
|
| 244 |
+
# y_col = np.arange(0, H) # y_col = np.arange(0, height)
|
| 245 |
+
# y = np.tile(y_col, (W, 1)).T
|
| 246 |
+
# y = y.astype(np.float32)
|
| 247 |
+
# y = torch.from_numpy(y.copy()).cuda()
|
| 248 |
+
# v_m_v0 = y[None, None, :, :] - v0
|
| 249 |
+
# self.register_buffer('v_m_v0', v_m_v0, persistent=False)
|
| 250 |
+
|
| 251 |
+
# pix_idx_mat = torch.arange(H*W).reshape((H, W)).cuda()
|
| 252 |
+
# self.register_buffer('pix_idx_mat', pix_idx_mat, persistent=False)
|
| 253 |
+
|
| 254 |
+
# x = self.u_m_u0 * depth / focal_length
|
| 255 |
+
# y = self.v_m_v0 * depth / focal_length
|
| 256 |
+
# z = depth
|
| 257 |
+
# pw = torch.cat([x, y, z], 1).permute(0, 2, 3, 1) # [b, h, w, c]
|
| 258 |
+
# return pw
|
| 259 |
+
|
| 260 |
+
def forward(self, depth, intrinsics, masks, scale):
|
| 261 |
+
"""
|
| 262 |
+
Args:
|
| 263 |
+
depth (Nx1xHxW): depth map
|
| 264 |
+
#inv_K (Nx4x4): inverse camera intrinsics
|
| 265 |
+
intrinsics (Nx4): camera intrinsics
|
| 266 |
+
Returns:
|
| 267 |
+
normal (Nx3xHxW): normalized surface normal
|
| 268 |
+
mask (Nx1xHxW): valid mask for surface normal
|
| 269 |
+
"""
|
| 270 |
+
B, C, H, W = depth.shape
|
| 271 |
+
if 'xy' not in self._buffers or self.xy.shape[-1] != H*W:
|
| 272 |
+
self.init_img_coor(height=H, width=W)
|
| 273 |
+
# Compute 3D point cloud
|
| 274 |
+
inv_K = intrinsics.inverse()
|
| 275 |
+
|
| 276 |
+
xyz = self.back_projection(depth, inv_K, scale=scale) # [N, 4, HxW]
|
| 277 |
+
|
| 278 |
+
xyz = xyz.view(depth.shape[0], 3, H, W)
|
| 279 |
+
xyz = xyz[:,:3].permute(0, 2, 3, 1).contiguous() # [b, h, w, c]
|
| 280 |
+
|
| 281 |
+
# focal_length = intrinsics[:, 0, 0][:, None, None, None]
|
| 282 |
+
# u0 = intrinsics[:, 0, 2][:, None, None, None]
|
| 283 |
+
# v0 = intrinsics[:, 1, 2][:, None, None, None]
|
| 284 |
+
# xyz2 = self.transfer_xyz(u0, v0, H, W, depth, focal_length)
|
| 285 |
+
|
| 286 |
+
normals, normal_masks = get_surface_normalv2(xyz, mask_valid=masks.squeeze())
|
| 287 |
+
normal_masks = normal_masks & masks
|
| 288 |
+
return normals, normal_masks
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
if __name__ == '__main__':
|
| 293 |
+
d2n = Depth2Normal()
|
| 294 |
+
depth = np.random.randn(2, 1, 20, 22)
|
| 295 |
+
intrin = np.array([[300, 0, 10], [0, 300, 10], [0,0,1]])
|
| 296 |
+
intrinsics = np.stack([intrin, intrin], axis=0)
|
| 297 |
+
|
| 298 |
+
depth_t = torch.from_numpy(depth).cuda().float()
|
| 299 |
+
intrinsics = torch.from_numpy(intrinsics).cuda().float()
|
| 300 |
+
normal = d2n(depth_t, intrinsics)
|
| 301 |
+
normal2 = d2n(depth_t, intrinsics)
|
| 302 |
+
print(normal)
|
external/Metric3D/training/mono/model/losses/photometric_loss_functions.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from mono.utils.inverse_warp import inverse_warp2
|
| 7 |
+
|
| 8 |
+
#device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SSIM(nn.Module):
|
| 12 |
+
"""Layer to compute the SSIM loss between a pair of images
|
| 13 |
+
"""
|
| 14 |
+
def __init__(self):
|
| 15 |
+
super(SSIM, self).__init__()
|
| 16 |
+
k = 7
|
| 17 |
+
self.mu_x_pool = nn.AvgPool2d(k, 1)
|
| 18 |
+
self.mu_y_pool = nn.AvgPool2d(k, 1)
|
| 19 |
+
self.sig_x_pool = nn.AvgPool2d(k, 1)
|
| 20 |
+
self.sig_y_pool = nn.AvgPool2d(k, 1)
|
| 21 |
+
self.sig_xy_pool = nn.AvgPool2d(k, 1)
|
| 22 |
+
|
| 23 |
+
self.refl = nn.ReflectionPad2d(k//2)
|
| 24 |
+
|
| 25 |
+
self.C1 = 0.01 ** 2
|
| 26 |
+
self.C2 = 0.03 ** 2
|
| 27 |
+
|
| 28 |
+
def forward(self, x, y):
|
| 29 |
+
x = self.refl(x)
|
| 30 |
+
y = self.refl(y)
|
| 31 |
+
|
| 32 |
+
mu_x = self.mu_x_pool(x)
|
| 33 |
+
mu_y = self.mu_y_pool(y)
|
| 34 |
+
|
| 35 |
+
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
|
| 36 |
+
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
|
| 37 |
+
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
|
| 38 |
+
|
| 39 |
+
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
|
| 40 |
+
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
|
| 41 |
+
|
| 42 |
+
return torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class PhotometricGeometricLoss(nn.Module):
|
| 46 |
+
"""The photometric and geometric loss between target and reference frames."""
|
| 47 |
+
def __init__(self, loss_weight=1.0, data_type=['sfm', 'stereo', 'lidar'], **kwargs):
|
| 48 |
+
super(PhotometricGeometricLoss, self).__init__()
|
| 49 |
+
self.no_min_optimize = False
|
| 50 |
+
self.no_auto_mask = False
|
| 51 |
+
self.return_dynamic_mask = True
|
| 52 |
+
self.ssim_loss = SSIM()
|
| 53 |
+
self.no_ssim = False
|
| 54 |
+
self.no_dynamic_mask = False
|
| 55 |
+
self.loss_weight_photo = 1.0
|
| 56 |
+
self.loss_weight_geometry = 0.5
|
| 57 |
+
self.total_loss_weight = loss_weight
|
| 58 |
+
self.data_type = data_type
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def photo_and_geometry_loss(self, tgt_img, ref_imgs, tgt_depth, ref_depths, intrinsics, poses, poses_inv):
|
| 62 |
+
|
| 63 |
+
diff_img_list = []
|
| 64 |
+
diff_color_list = []
|
| 65 |
+
diff_depth_list = []
|
| 66 |
+
valid_mask_list = []
|
| 67 |
+
auto_mask_list = []
|
| 68 |
+
|
| 69 |
+
for ref_img, ref_depth, pose, pose_inv in zip(ref_imgs, ref_depths, poses, poses_inv):
|
| 70 |
+
(
|
| 71 |
+
diff_img_tmp1,
|
| 72 |
+
diff_color_tmp1,
|
| 73 |
+
diff_depth_tmp1,
|
| 74 |
+
valid_mask_tmp1,
|
| 75 |
+
auto_mask_tmp1
|
| 76 |
+
) = self.compute_pairwise_loss(
|
| 77 |
+
tgt_img,
|
| 78 |
+
ref_img,
|
| 79 |
+
tgt_depth,
|
| 80 |
+
ref_depth,
|
| 81 |
+
pose,
|
| 82 |
+
intrinsics,
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
(
|
| 86 |
+
diff_img_tmp2,
|
| 87 |
+
diff_color_tmp2,
|
| 88 |
+
diff_depth_tmp2,
|
| 89 |
+
valid_mask_tmp2,
|
| 90 |
+
auto_mask_tmp2
|
| 91 |
+
) = self.compute_pairwise_loss(
|
| 92 |
+
ref_img,
|
| 93 |
+
tgt_img,
|
| 94 |
+
ref_depth,
|
| 95 |
+
tgt_depth,
|
| 96 |
+
pose_inv,
|
| 97 |
+
intrinsics,
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
diff_img_list += [diff_img_tmp1, diff_img_tmp2]
|
| 101 |
+
diff_color_list += [diff_color_tmp1, diff_color_tmp2]
|
| 102 |
+
diff_depth_list += [diff_depth_tmp1, diff_depth_tmp2]
|
| 103 |
+
valid_mask_list += [valid_mask_tmp1, valid_mask_tmp2]
|
| 104 |
+
auto_mask_list += [auto_mask_tmp1, auto_mask_tmp2]
|
| 105 |
+
|
| 106 |
+
diff_img = torch.cat(diff_img_list, dim=1)
|
| 107 |
+
diff_color = torch.cat(diff_color_list, dim=1)
|
| 108 |
+
diff_depth = torch.cat(diff_depth_list, dim=1)
|
| 109 |
+
valid_mask = torch.cat(valid_mask_list, dim=1)
|
| 110 |
+
auto_mask = torch.cat(auto_mask_list, dim=1)
|
| 111 |
+
|
| 112 |
+
# using photo loss to select best match in multiple views
|
| 113 |
+
if not self.no_min_optimize:
|
| 114 |
+
indices = torch.argmin(diff_color, dim=1, keepdim=True)
|
| 115 |
+
|
| 116 |
+
diff_img = torch.gather(diff_img, 1, indices)
|
| 117 |
+
diff_depth = torch.gather(diff_depth, 1, indices)
|
| 118 |
+
valid_mask = torch.gather(valid_mask, 1, indices)
|
| 119 |
+
auto_mask = torch.gather(auto_mask, 1, indices)
|
| 120 |
+
|
| 121 |
+
if not self.no_auto_mask:
|
| 122 |
+
photo_loss = self.mean_on_mask(diff_img, valid_mask * auto_mask)
|
| 123 |
+
geometry_loss = self.mean_on_mask(diff_depth, valid_mask * auto_mask)
|
| 124 |
+
else:
|
| 125 |
+
photo_loss = self.mean_on_mask(diff_img, valid_mask)
|
| 126 |
+
geometry_loss = self.mean_on_mask(diff_depth, valid_mask)
|
| 127 |
+
|
| 128 |
+
dynamic_mask = None
|
| 129 |
+
if self.return_dynamic_mask:
|
| 130 |
+
# get dynamic mask for tgt image
|
| 131 |
+
dynamic_mask_list = []
|
| 132 |
+
for i in range(0, len(diff_depth_list), 2):
|
| 133 |
+
tmp = diff_depth_list[i]
|
| 134 |
+
tmp[valid_mask_list[1]<1] = 0
|
| 135 |
+
dynamic_mask_list += [1-tmp]
|
| 136 |
+
|
| 137 |
+
dynamic_mask = torch.cat(dynamic_mask_list, dim=1).mean(dim=1, keepdim=True)
|
| 138 |
+
|
| 139 |
+
return photo_loss, geometry_loss, dynamic_mask
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def compute_pairwise_loss(self, tgt_img, ref_img, tgt_depth, ref_depth, pose, intrinsic):
|
| 143 |
+
|
| 144 |
+
ref_img_warped, projected_depth, computed_depth = inverse_warp2(ref_img, tgt_depth, ref_depth, pose, intrinsic, padding_mode='zeros')
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
diff_depth = (computed_depth-projected_depth).abs()/(computed_depth+projected_depth)
|
| 148 |
+
|
| 149 |
+
# masking zero values
|
| 150 |
+
valid_mask_ref = (ref_img_warped.abs().mean(dim=1, keepdim=True) > 1e-3).float()
|
| 151 |
+
valid_mask_tgt = (tgt_img.abs().mean(dim=1, keepdim=True) > 1e-3).float()
|
| 152 |
+
valid_mask = valid_mask_tgt * valid_mask_ref
|
| 153 |
+
|
| 154 |
+
diff_color = (tgt_img-ref_img_warped).abs().mean(dim=1, keepdim=True)
|
| 155 |
+
identity_warp_err = (tgt_img-ref_img).abs().mean(dim=1, keepdim=True)
|
| 156 |
+
auto_mask = (diff_color<identity_warp_err).float()
|
| 157 |
+
|
| 158 |
+
diff_img = (tgt_img-ref_img_warped).abs().clamp(0,1)
|
| 159 |
+
if not self.no_ssim:
|
| 160 |
+
ssim_map = self.ssim_loss(tgt_img, ref_img_warped)
|
| 161 |
+
diff_img = (0.15 * diff_img + 0.85 * ssim_map)
|
| 162 |
+
diff_img = torch.mean(diff_img, dim=1, keepdim=True)
|
| 163 |
+
|
| 164 |
+
# reduce photometric loss weight for dynamic regions
|
| 165 |
+
if not self.no_dynamic_mask:
|
| 166 |
+
weight_mask = (1-diff_depth)
|
| 167 |
+
diff_img = diff_img * weight_mask
|
| 168 |
+
|
| 169 |
+
return diff_img, diff_color, diff_depth, valid_mask, auto_mask
|
| 170 |
+
|
| 171 |
+
# compute mean value on a binary mask
|
| 172 |
+
def mean_on_mask(self, diff, valid_mask):
|
| 173 |
+
mask = valid_mask.expand_as(diff)
|
| 174 |
+
# if mask.sum() > 100:
|
| 175 |
+
# mean_value = (diff * mask).sum() / mask.sum()
|
| 176 |
+
# else:
|
| 177 |
+
# mean_value = torch.tensor(0).float().to(device)
|
| 178 |
+
mean_value = (diff * mask).sum() / (mask.sum() + 1e-6)
|
| 179 |
+
return mean_value
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
def forward(self, input, ref_input, prediction, ref_prediction, intrinsic, **kwargs):
|
| 183 |
+
photo_loss, geometry_loss, dynamic_mask = self.photo_and_geometry_loss(
|
| 184 |
+
tgt_img=input,
|
| 185 |
+
ref_imgs=ref_input,
|
| 186 |
+
tgt_depth=prediction,
|
| 187 |
+
ref_depths=ref_prediction,
|
| 188 |
+
intrinsics=intrinsic,
|
| 189 |
+
poses=kwargs['pose'],
|
| 190 |
+
poses_inv=kwargs['inv_pose'])
|
| 191 |
+
loss = self.loss_weight_geometry * geometry_loss + self.loss_weight_photo * photo_loss
|
| 192 |
+
if torch.isnan(loss).item() | torch.isinf(loss).item():
|
| 193 |
+
raise RuntimeError(f'VNL error, {loss}')
|
| 194 |
+
return loss * self.total_loss_weight
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
# def compute_smooth_loss(tgt_depth, tgt_img):
|
| 204 |
+
# def get_smooth_loss(disp, img):
|
| 205 |
+
# """
|
| 206 |
+
# Computes the smoothness loss for a disparity image
|
| 207 |
+
# The color image is used for edge-aware smoothness
|
| 208 |
+
# """
|
| 209 |
+
|
| 210 |
+
# # normalize
|
| 211 |
+
# mean_disp = disp.mean(2, True).mean(3, True)
|
| 212 |
+
# norm_disp = disp / (mean_disp + 1e-7)
|
| 213 |
+
# disp = norm_disp
|
| 214 |
+
|
| 215 |
+
# grad_disp_x = torch.abs(disp[:, :, :, :-1] - disp[:, :, :, 1:])
|
| 216 |
+
# grad_disp_y = torch.abs(disp[:, :, :-1, :] - disp[:, :, 1:, :])
|
| 217 |
+
|
| 218 |
+
# grad_img_x = torch.mean(torch.abs(img[:, :, :, :-1] - img[:, :, :, 1:]), 1, keepdim=True)
|
| 219 |
+
# grad_img_y = torch.mean(torch.abs(img[:, :, :-1, :] - img[:, :, 1:, :]), 1, keepdim=True)
|
| 220 |
+
|
| 221 |
+
# grad_disp_x *= torch.exp(-grad_img_x)
|
| 222 |
+
# grad_disp_y *= torch.exp(-grad_img_y)
|
| 223 |
+
|
| 224 |
+
# return grad_disp_x.mean() + grad_disp_y.mean()
|
| 225 |
+
|
| 226 |
+
# loss = get_smooth_loss(tgt_depth, tgt_img)
|
| 227 |
+
|
| 228 |
+
# return loss
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# @torch.no_grad()
|
| 232 |
+
# def compute_errors(gt, pred, dataset):
|
| 233 |
+
# # pred : b c h w
|
| 234 |
+
# # gt: b h w
|
| 235 |
+
|
| 236 |
+
# abs_diff = abs_rel = sq_rel = log10 = rmse = rmse_log = a1 = a2 = a3 = 0.0
|
| 237 |
+
|
| 238 |
+
# batch_size, h, w = gt.size()
|
| 239 |
+
|
| 240 |
+
# if pred.nelement() != gt.nelement():
|
| 241 |
+
# pred = F.interpolate(pred, [h,w], mode='bilinear', align_corners=False)
|
| 242 |
+
# # pred = F.interpolate(pred, [h,w], mode='nearest')
|
| 243 |
+
|
| 244 |
+
# pred = pred.view(batch_size, h, w)
|
| 245 |
+
|
| 246 |
+
# if dataset == 'kitti':
|
| 247 |
+
# crop_mask = gt[0] != gt[0]
|
| 248 |
+
# y1, y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
|
| 249 |
+
# x1, x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
|
| 250 |
+
# crop_mask[y1:y2, x1:x2] = 1
|
| 251 |
+
# max_depth = 80
|
| 252 |
+
|
| 253 |
+
# if dataset == 'cs':
|
| 254 |
+
# crop_mask = gt[0] != gt[0]
|
| 255 |
+
# crop_mask[256:, 192:1856] = 1
|
| 256 |
+
# max_depth = 80
|
| 257 |
+
|
| 258 |
+
# if dataset == 'nyu':
|
| 259 |
+
# crop_mask = gt[0] != gt[0]
|
| 260 |
+
# crop = np.array([45, 471, 41, 601]).astype(np.int32)
|
| 261 |
+
# crop_mask[crop[0]:crop[1], crop[2]:crop[3]] = 1
|
| 262 |
+
# max_depth = 10
|
| 263 |
+
|
| 264 |
+
# if dataset == 'bonn':
|
| 265 |
+
# crop_mask = gt[0] != gt[0]
|
| 266 |
+
# crop_mask[:,:] = 1
|
| 267 |
+
# max_depth = 10
|
| 268 |
+
|
| 269 |
+
# if dataset == 'ddad':
|
| 270 |
+
# crop_mask = gt[0] != gt[0]
|
| 271 |
+
# crop_mask[:,:] = 1
|
| 272 |
+
# max_depth = 200
|
| 273 |
+
|
| 274 |
+
# min_depth = 1e-3
|
| 275 |
+
# for current_gt, current_pred in zip(gt, pred):
|
| 276 |
+
# valid = (current_gt > min_depth) & (current_gt < max_depth)
|
| 277 |
+
# valid = valid & crop_mask
|
| 278 |
+
|
| 279 |
+
# valid_gt = current_gt[valid]
|
| 280 |
+
# valid_pred = current_pred[valid]
|
| 281 |
+
|
| 282 |
+
# # align scale
|
| 283 |
+
# valid_pred = valid_pred * torch.median(valid_gt)/torch.median(valid_pred)
|
| 284 |
+
|
| 285 |
+
# valid_pred = valid_pred.clamp(min_depth, max_depth)
|
| 286 |
+
|
| 287 |
+
# thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
|
| 288 |
+
# a1 += (thresh < 1.25).float().mean()
|
| 289 |
+
# a2 += (thresh < 1.25 ** 2).float().mean()
|
| 290 |
+
# a3 += (thresh < 1.25 ** 3).float().mean()
|
| 291 |
+
|
| 292 |
+
# diff_i = valid_gt - valid_pred
|
| 293 |
+
# abs_diff += torch.mean(torch.abs(diff_i))
|
| 294 |
+
# abs_rel += torch.mean(torch.abs(diff_i) / valid_gt)
|
| 295 |
+
# sq_rel += torch.mean(((diff_i)**2) / valid_gt)
|
| 296 |
+
# rmse += torch.sqrt(torch.mean(diff_i ** 2))
|
| 297 |
+
# rmse_log += torch.sqrt(torch.mean((torch.log(valid_gt) - torch.log(valid_pred)) ** 2))
|
| 298 |
+
# log10 += torch.mean(torch.abs((torch.log10(valid_gt) - torch.log10(valid_pred))))
|
| 299 |
+
|
| 300 |
+
# return [metric.item() / batch_size for metric in [abs_diff, abs_rel, sq_rel, log10, rmse, rmse_log, a1, a2, a3]]
|
external/Metric3D/training/mono/model/model_pipelines/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .model_pipeline import EncoderDecoder
|
| 2 |
+
from .dense_pipeline import DensePredModel
|
| 3 |
+
|
| 4 |
+
__all__ = [
|
| 5 |
+
'EncoderDecoder', 'DensePredModel'
|
| 6 |
+
]
|
external/Metric3D/training/mono/model/model_pipelines/dense_pipeline.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from mono.utils.comm import get_func
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class DensePredModel(nn.Module):
|
| 7 |
+
def __init__(self, cfg):
|
| 8 |
+
super(DensePredModel, self).__init__()
|
| 9 |
+
|
| 10 |
+
self.encoder = get_func('mono.model.' + cfg.model.backbone.prefix + cfg.model.backbone.type)(**cfg.model.backbone)
|
| 11 |
+
self.decoder = get_func('mono.model.' + cfg.model.decode_head.prefix + cfg.model.decode_head.type)(cfg)
|
| 12 |
+
# try:
|
| 13 |
+
# decoder_compiled = torch.compile(decoder, mode='max-autotune')
|
| 14 |
+
# "Decoder compile finished"
|
| 15 |
+
# self.decoder = decoder_compiled
|
| 16 |
+
# except:
|
| 17 |
+
# "Decoder compile failed, use default setting"
|
| 18 |
+
# self.decoder = decoder
|
| 19 |
+
|
| 20 |
+
self.training = True
|
| 21 |
+
|
| 22 |
+
def forward(self, input, **kwargs):
|
| 23 |
+
# [f_32, f_16, f_8, f_4]
|
| 24 |
+
features = self.encoder(input)
|
| 25 |
+
# [x_32, x_16, x_8, x_4, x, ...]
|
| 26 |
+
out = self.decoder(features, **kwargs)
|
| 27 |
+
return out
|
external/Metric3D/training/mono/model/model_pipelines/model_pipeline.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from mono.utils.comm import get_func
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class EncoderDecoder(nn.Module):
|
| 7 |
+
def __init__(self, cfg):
|
| 8 |
+
super(EncoderDecoder, self).__init__()
|
| 9 |
+
|
| 10 |
+
self.encoder = get_func('mono.model.' + cfg.model.backbone.prefix + cfg.model.backbone.type)(**cfg.model.backbone)
|
| 11 |
+
self.decoder = get_func('mono.model.' + cfg.model.decode_head.prefix + cfg.model.decode_head.type)(cfg)
|
| 12 |
+
|
| 13 |
+
self.depth_out_head = DepthOutHead(method=cfg.model.depth_out_head.method, **cfg)
|
| 14 |
+
self.training = True
|
| 15 |
+
|
| 16 |
+
def forward(self, input, **kwargs):
|
| 17 |
+
# [f_32, f_16, f_8, f_4]
|
| 18 |
+
features = self.encoder(input)
|
| 19 |
+
# [x_32, x_16, x_8, x_4, x, ...]
|
| 20 |
+
decode_list = self.decoder(features)
|
| 21 |
+
|
| 22 |
+
pred, conf, logit, bins_edges = self.depth_out_head([decode_list[4], ])
|
| 23 |
+
|
| 24 |
+
auxi_preds = None
|
| 25 |
+
auxi_logits = None
|
| 26 |
+
out = dict(
|
| 27 |
+
prediction=pred[0],
|
| 28 |
+
confidence=conf[0],
|
| 29 |
+
pred_logit=logit[0],
|
| 30 |
+
auxi_pred=auxi_preds,
|
| 31 |
+
auxi_logit_list=auxi_logits,
|
| 32 |
+
bins_edges=bins_edges[0],
|
| 33 |
+
)
|
| 34 |
+
return out
|
external/Metric3D/training/mono/model/monodepth_model.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
from mono.utils.comm import get_func
|
| 4 |
+
from .__base_model__ import BaseDepthModel
|
| 5 |
+
|
| 6 |
+
class DepthModel(BaseDepthModel):
|
| 7 |
+
def __init__(self, cfg, criterions, **kwards):
|
| 8 |
+
super(DepthModel, self).__init__(cfg, criterions)
|
| 9 |
+
model_type = cfg.model.type
|
| 10 |
+
self.training = True
|
| 11 |
+
|
| 12 |
+
# def inference(self, data):
|
| 13 |
+
# with torch.no_grad():
|
| 14 |
+
# pred_depth, _, confidence = self.inference(data)
|
| 15 |
+
# return pred_depth, confidence
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_monodepth_model(
|
| 19 |
+
cfg : dict,
|
| 20 |
+
criterions: dict,
|
| 21 |
+
**kwargs
|
| 22 |
+
) -> nn.Module:
|
| 23 |
+
# config depth model
|
| 24 |
+
model = DepthModel(cfg, criterions, **kwargs)
|
| 25 |
+
#model.init_weights(load_imagenet_model, imagenet_ckpt_fpath)
|
| 26 |
+
assert isinstance(model, nn.Module)
|
| 27 |
+
return model
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_configured_monodepth_model(
|
| 31 |
+
cfg: dict,
|
| 32 |
+
criterions: dict,
|
| 33 |
+
) -> nn.Module:
|
| 34 |
+
"""
|
| 35 |
+
Args:
|
| 36 |
+
@ configs: configures for the network.
|
| 37 |
+
@ load_imagenet_model: whether to initialize from ImageNet-pretrained model.
|
| 38 |
+
@ imagenet_ckpt_fpath: string representing path to file with weights to initialize model with.
|
| 39 |
+
Returns:
|
| 40 |
+
# model: depth model.
|
| 41 |
+
"""
|
| 42 |
+
model = get_monodepth_model(cfg, criterions)
|
| 43 |
+
return model
|
| 44 |
+
|
| 45 |
+
|
external/Metric3D/training/mono/scripts/test_scripts/test_vit.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cd ../../../
|
| 2 |
+
|
| 3 |
+
python mono/tools/test.py \
|
| 4 |
+
mono/configs/test_configs_vit_small/ibims.vit.dpt.raft.py \
|
| 5 |
+
--load-from vit_small_step00800000.pth
|
external/Metric3D/training/mono/scripts/train_scripts/train.sh
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cd ../../../
|
| 2 |
+
|
| 3 |
+
python mono/tools/train.py \
|
| 4 |
+
mono/configs/RAFTDecoder/vit.raft5.small.sanity_check.py \
|
| 5 |
+
--use-tensorboard \
|
| 6 |
+
--launcher slurm \
|
| 7 |
+
--experiment_name set1
|
external/Metric3D/training/mono/scripts/train_scripts/train_kitti.sh
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
cd ../../../
|
| 2 |
+
|
| 3 |
+
python mono/tools/train.py \
|
| 4 |
+
mono/configs/RAFTDecoder/vit.raft5.large.kitti.py \
|
| 5 |
+
--use-tensorboard \
|
| 6 |
+
--launcher slurm \
|
| 7 |
+
--load-from Path_to_Checkpoint.pth \
|
| 8 |
+
--experiment_name set1
|