Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- RAVE-main/annotator/midas/LICENSE +21 -0
- RAVE-main/annotator/midas/__init__.py +49 -0
- RAVE-main/annotator/midas/__pycache__/__init__.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/__pycache__/api.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/api.py +181 -0
- RAVE-main/annotator/midas/midas/__init__.py +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/__pycache__/vit.cpython-38.pyc +0 -0
- RAVE-main/annotator/midas/midas/base_model.py +16 -0
- RAVE-main/annotator/midas/midas/blocks.py +342 -0
- RAVE-main/annotator/midas/midas/dpt_depth.py +109 -0
- RAVE-main/annotator/midas/midas/midas_net.py +76 -0
- RAVE-main/annotator/midas/midas/midas_net_custom.py +128 -0
- RAVE-main/annotator/midas/midas/transforms.py +234 -0
- RAVE-main/annotator/midas/midas/vit.py +491 -0
- RAVE-main/annotator/midas/utils.py +189 -0
- RAVE-main/annotator/mmpkg/mmcv/__init__.py +15 -0
- RAVE-main/annotator/mmpkg/mmcv/engine/__init__.py +8 -0
- RAVE-main/annotator/mmpkg/mmcv/fileio/parse.py +97 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/data_container.py +89 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/distributed_deprecated.py +70 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/scatter_gather.py +59 -0
- RAVE-main/annotator/mmpkg/mmcv/parallel/utils.py +20 -0
- RAVE-main/annotator/mmpkg/mmcv/version.py +35 -0
- RAVE-main/annotator/oneformer/detectron2/export/README.md +15 -0
- RAVE-main/annotator/oneformer/detectron2/export/__init__.py +30 -0
- RAVE-main/annotator/oneformer/detectron2/export/api.py +230 -0
- RAVE-main/annotator/oneformer/detectron2/export/c10.py +557 -0
- RAVE-main/annotator/oneformer/detectron2/export/caffe2_export.py +203 -0
- RAVE-main/annotator/oneformer/detectron2/export/caffe2_inference.py +161 -0
- RAVE-main/annotator/oneformer/detectron2/export/caffe2_modeling.py +419 -0
- RAVE-main/annotator/oneformer/detectron2/export/caffe2_patch.py +152 -0
- RAVE-main/annotator/oneformer/detectron2/export/flatten.py +330 -0
- RAVE-main/annotator/oneformer/detectron2/export/shared.py +1039 -0
- RAVE-main/annotator/oneformer/detectron2/export/torchscript.py +132 -0
- RAVE-main/annotator/oneformer/detectron2/export/torchscript_patch.py +406 -0
- RAVE-main/annotator/oneformer/detectron2/layers/aspp.py +144 -0
- RAVE-main/annotator/oneformer/detectron2/layers/batch_norm.py +300 -0
- RAVE-main/annotator/oneformer/detectron2/layers/blocks.py +111 -0
- RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h +35 -0
- RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp +39 -0
- RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu +130 -0
- RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h +370 -0
- RAVE-main/annotator/oneformer/detectron2/layers/csrc/cocoeval/cocoeval.cpp +507 -0
RAVE-main/annotator/midas/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab)
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
RAVE-main/annotator/midas/__init__.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import torch
|
| 4 |
+
|
| 5 |
+
from einops import rearrange
|
| 6 |
+
from .api import MiDaSInference
|
| 7 |
+
from annotator.annotator_path import DEVICE
|
| 8 |
+
|
| 9 |
+
model = None
|
| 10 |
+
|
| 11 |
+
def unload_midas_model():
|
| 12 |
+
global model
|
| 13 |
+
if model is not None:
|
| 14 |
+
model = model.cpu()
|
| 15 |
+
|
| 16 |
+
def apply_midas(input_image, a=np.pi * 2.0, bg_th=0.1):
|
| 17 |
+
global model
|
| 18 |
+
if model is None:
|
| 19 |
+
model = MiDaSInference(model_type="dpt_hybrid")
|
| 20 |
+
if DEVICE.type != 'mps':
|
| 21 |
+
model = model.to(DEVICE)
|
| 22 |
+
|
| 23 |
+
assert input_image.ndim == 3
|
| 24 |
+
image_depth = input_image
|
| 25 |
+
with torch.no_grad():
|
| 26 |
+
image_depth = torch.from_numpy(image_depth).float()
|
| 27 |
+
if DEVICE.type != 'mps':
|
| 28 |
+
image_depth = image_depth.to(DEVICE)
|
| 29 |
+
image_depth = image_depth / 127.5 - 1.0
|
| 30 |
+
image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
|
| 31 |
+
depth = model(image_depth)[0]
|
| 32 |
+
|
| 33 |
+
depth_pt = depth.clone()
|
| 34 |
+
depth_pt -= torch.min(depth_pt)
|
| 35 |
+
depth_pt /= torch.max(depth_pt)
|
| 36 |
+
depth_pt = depth_pt.cpu().numpy()
|
| 37 |
+
depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
|
| 38 |
+
|
| 39 |
+
depth_np = depth.cpu().numpy()
|
| 40 |
+
x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
|
| 41 |
+
y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
|
| 42 |
+
z = np.ones_like(x) * a
|
| 43 |
+
x[depth_pt < bg_th] = 0
|
| 44 |
+
y[depth_pt < bg_th] = 0
|
| 45 |
+
normal = np.stack([x, y, z], axis=2)
|
| 46 |
+
normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5
|
| 47 |
+
normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)[:, :, ::-1]
|
| 48 |
+
|
| 49 |
+
return depth_image, normal_image
|
RAVE-main/annotator/midas/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (1.55 kB). View file
|
|
|
RAVE-main/annotator/midas/__pycache__/api.cpython-38.pyc
ADDED
|
Binary file (3.99 kB). View file
|
|
|
RAVE-main/annotator/midas/api.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# based on https://github.com/isl-org/MiDaS
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import os
|
| 7 |
+
from annotator.annotator_path import models_path
|
| 8 |
+
|
| 9 |
+
from torchvision.transforms import Compose
|
| 10 |
+
|
| 11 |
+
from .midas.dpt_depth import DPTDepthModel
|
| 12 |
+
from .midas.midas_net import MidasNet
|
| 13 |
+
from .midas.midas_net_custom import MidasNet_small
|
| 14 |
+
from .midas.transforms import Resize, NormalizeImage, PrepareForNet
|
| 15 |
+
|
| 16 |
+
base_model_path = os.path.join(models_path, "midas")
|
| 17 |
+
old_modeldir = os.path.dirname(os.path.realpath(__file__))
|
| 18 |
+
remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt"
|
| 19 |
+
|
| 20 |
+
ISL_PATHS = {
|
| 21 |
+
"dpt_large": os.path.join(base_model_path, "dpt_large-midas-2f21e586.pt"),
|
| 22 |
+
"dpt_hybrid": os.path.join(base_model_path, "dpt_hybrid-midas-501f0c75.pt"),
|
| 23 |
+
"midas_v21": "",
|
| 24 |
+
"midas_v21_small": "",
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
OLD_ISL_PATHS = {
|
| 28 |
+
"dpt_large": os.path.join(old_modeldir, "dpt_large-midas-2f21e586.pt"),
|
| 29 |
+
"dpt_hybrid": os.path.join(old_modeldir, "dpt_hybrid-midas-501f0c75.pt"),
|
| 30 |
+
"midas_v21": "",
|
| 31 |
+
"midas_v21_small": "",
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def disabled_train(self, mode=True):
|
| 36 |
+
"""Overwrite model.train with this function to make sure train/eval mode
|
| 37 |
+
does not change anymore."""
|
| 38 |
+
return self
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def load_midas_transform(model_type):
|
| 42 |
+
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
| 43 |
+
# load transform only
|
| 44 |
+
if model_type == "dpt_large": # DPT-Large
|
| 45 |
+
net_w, net_h = 384, 384
|
| 46 |
+
resize_mode = "minimal"
|
| 47 |
+
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 48 |
+
|
| 49 |
+
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
| 50 |
+
net_w, net_h = 384, 384
|
| 51 |
+
resize_mode = "minimal"
|
| 52 |
+
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 53 |
+
|
| 54 |
+
elif model_type == "midas_v21":
|
| 55 |
+
net_w, net_h = 384, 384
|
| 56 |
+
resize_mode = "upper_bound"
|
| 57 |
+
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 58 |
+
|
| 59 |
+
elif model_type == "midas_v21_small":
|
| 60 |
+
net_w, net_h = 256, 256
|
| 61 |
+
resize_mode = "upper_bound"
|
| 62 |
+
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 63 |
+
|
| 64 |
+
else:
|
| 65 |
+
assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
|
| 66 |
+
|
| 67 |
+
transform = Compose(
|
| 68 |
+
[
|
| 69 |
+
Resize(
|
| 70 |
+
net_w,
|
| 71 |
+
net_h,
|
| 72 |
+
resize_target=None,
|
| 73 |
+
keep_aspect_ratio=True,
|
| 74 |
+
ensure_multiple_of=32,
|
| 75 |
+
resize_method=resize_mode,
|
| 76 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 77 |
+
),
|
| 78 |
+
normalization,
|
| 79 |
+
PrepareForNet(),
|
| 80 |
+
]
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
return transform
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def load_model(model_type):
|
| 87 |
+
# https://github.com/isl-org/MiDaS/blob/master/run.py
|
| 88 |
+
# load network
|
| 89 |
+
model_path = ISL_PATHS[model_type]
|
| 90 |
+
old_model_path = OLD_ISL_PATHS[model_type]
|
| 91 |
+
if model_type == "dpt_large": # DPT-Large
|
| 92 |
+
model = DPTDepthModel(
|
| 93 |
+
path=model_path,
|
| 94 |
+
backbone="vitl16_384",
|
| 95 |
+
non_negative=True,
|
| 96 |
+
)
|
| 97 |
+
net_w, net_h = 384, 384
|
| 98 |
+
resize_mode = "minimal"
|
| 99 |
+
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 100 |
+
|
| 101 |
+
elif model_type == "dpt_hybrid": # DPT-Hybrid
|
| 102 |
+
if os.path.exists(old_model_path):
|
| 103 |
+
model_path = old_model_path
|
| 104 |
+
elif not os.path.exists(model_path):
|
| 105 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 106 |
+
load_file_from_url(remote_model_path, model_dir=base_model_path)
|
| 107 |
+
|
| 108 |
+
model = DPTDepthModel(
|
| 109 |
+
path=model_path,
|
| 110 |
+
backbone="vitb_rn50_384",
|
| 111 |
+
non_negative=True,
|
| 112 |
+
)
|
| 113 |
+
net_w, net_h = 384, 384
|
| 114 |
+
resize_mode = "minimal"
|
| 115 |
+
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
|
| 116 |
+
|
| 117 |
+
elif model_type == "midas_v21":
|
| 118 |
+
model = MidasNet(model_path, non_negative=True)
|
| 119 |
+
net_w, net_h = 384, 384
|
| 120 |
+
resize_mode = "upper_bound"
|
| 121 |
+
normalization = NormalizeImage(
|
| 122 |
+
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
elif model_type == "midas_v21_small":
|
| 126 |
+
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
|
| 127 |
+
non_negative=True, blocks={'expand': True})
|
| 128 |
+
net_w, net_h = 256, 256
|
| 129 |
+
resize_mode = "upper_bound"
|
| 130 |
+
normalization = NormalizeImage(
|
| 131 |
+
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
else:
|
| 135 |
+
print(f"model_type '{model_type}' not implemented, use: --model_type large")
|
| 136 |
+
assert False
|
| 137 |
+
|
| 138 |
+
transform = Compose(
|
| 139 |
+
[
|
| 140 |
+
Resize(
|
| 141 |
+
net_w,
|
| 142 |
+
net_h,
|
| 143 |
+
resize_target=None,
|
| 144 |
+
keep_aspect_ratio=True,
|
| 145 |
+
ensure_multiple_of=32,
|
| 146 |
+
resize_method=resize_mode,
|
| 147 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
| 148 |
+
),
|
| 149 |
+
normalization,
|
| 150 |
+
PrepareForNet(),
|
| 151 |
+
]
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
return model.eval(), transform
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class MiDaSInference(nn.Module):
|
| 158 |
+
MODEL_TYPES_TORCH_HUB = [
|
| 159 |
+
"DPT_Large",
|
| 160 |
+
"DPT_Hybrid",
|
| 161 |
+
"MiDaS_small"
|
| 162 |
+
]
|
| 163 |
+
MODEL_TYPES_ISL = [
|
| 164 |
+
"dpt_large",
|
| 165 |
+
"dpt_hybrid",
|
| 166 |
+
"midas_v21",
|
| 167 |
+
"midas_v21_small",
|
| 168 |
+
]
|
| 169 |
+
|
| 170 |
+
def __init__(self, model_type):
|
| 171 |
+
super().__init__()
|
| 172 |
+
assert (model_type in self.MODEL_TYPES_ISL)
|
| 173 |
+
model, _ = load_model(model_type)
|
| 174 |
+
self.model = model
|
| 175 |
+
self.model.train = disabled_train
|
| 176 |
+
|
| 177 |
+
def forward(self, x):
|
| 178 |
+
with torch.no_grad():
|
| 179 |
+
prediction = self.model(x)
|
| 180 |
+
return prediction
|
| 181 |
+
|
RAVE-main/annotator/midas/midas/__init__.py
ADDED
|
File without changes
|
RAVE-main/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (152 Bytes). View file
|
|
|
RAVE-main/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc
ADDED
|
Binary file (678 Bytes). View file
|
|
|
RAVE-main/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc
ADDED
|
Binary file (7.37 kB). View file
|
|
|
RAVE-main/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc
ADDED
|
Binary file (2.87 kB). View file
|
|
|
RAVE-main/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc
ADDED
|
Binary file (2.58 kB). View file
|
|
|
RAVE-main/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc
ADDED
|
Binary file (3.7 kB). View file
|
|
|
RAVE-main/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc
ADDED
|
Binary file (5.7 kB). View file
|
|
|
RAVE-main/annotator/midas/midas/__pycache__/vit.cpython-38.pyc
ADDED
|
Binary file (9.71 kB). View file
|
|
|
RAVE-main/annotator/midas/midas/base_model.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class BaseModel(torch.nn.Module):
|
| 5 |
+
def load(self, path):
|
| 6 |
+
"""Load model from file.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
path (str): file path
|
| 10 |
+
"""
|
| 11 |
+
parameters = torch.load(path, map_location=torch.device('cpu'))
|
| 12 |
+
|
| 13 |
+
if "optimizer" in parameters:
|
| 14 |
+
parameters = parameters["model"]
|
| 15 |
+
|
| 16 |
+
self.load_state_dict(parameters)
|
RAVE-main/annotator/midas/midas/blocks.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
|
| 4 |
+
from .vit import (
|
| 5 |
+
_make_pretrained_vitb_rn50_384,
|
| 6 |
+
_make_pretrained_vitl16_384,
|
| 7 |
+
_make_pretrained_vitb16_384,
|
| 8 |
+
forward_vit,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
|
| 12 |
+
if backbone == "vitl16_384":
|
| 13 |
+
pretrained = _make_pretrained_vitl16_384(
|
| 14 |
+
use_pretrained, hooks=hooks, use_readout=use_readout
|
| 15 |
+
)
|
| 16 |
+
scratch = _make_scratch(
|
| 17 |
+
[256, 512, 1024, 1024], features, groups=groups, expand=expand
|
| 18 |
+
) # ViT-L/16 - 85.0% Top1 (backbone)
|
| 19 |
+
elif backbone == "vitb_rn50_384":
|
| 20 |
+
pretrained = _make_pretrained_vitb_rn50_384(
|
| 21 |
+
use_pretrained,
|
| 22 |
+
hooks=hooks,
|
| 23 |
+
use_vit_only=use_vit_only,
|
| 24 |
+
use_readout=use_readout,
|
| 25 |
+
)
|
| 26 |
+
scratch = _make_scratch(
|
| 27 |
+
[256, 512, 768, 768], features, groups=groups, expand=expand
|
| 28 |
+
) # ViT-H/16 - 85.0% Top1 (backbone)
|
| 29 |
+
elif backbone == "vitb16_384":
|
| 30 |
+
pretrained = _make_pretrained_vitb16_384(
|
| 31 |
+
use_pretrained, hooks=hooks, use_readout=use_readout
|
| 32 |
+
)
|
| 33 |
+
scratch = _make_scratch(
|
| 34 |
+
[96, 192, 384, 768], features, groups=groups, expand=expand
|
| 35 |
+
) # ViT-B/16 - 84.6% Top1 (backbone)
|
| 36 |
+
elif backbone == "resnext101_wsl":
|
| 37 |
+
pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
|
| 38 |
+
scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
|
| 39 |
+
elif backbone == "efficientnet_lite3":
|
| 40 |
+
pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
|
| 41 |
+
scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
|
| 42 |
+
else:
|
| 43 |
+
print(f"Backbone '{backbone}' not implemented")
|
| 44 |
+
assert False
|
| 45 |
+
|
| 46 |
+
return pretrained, scratch
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _make_scratch(in_shape, out_shape, groups=1, expand=False):
|
| 50 |
+
scratch = nn.Module()
|
| 51 |
+
|
| 52 |
+
out_shape1 = out_shape
|
| 53 |
+
out_shape2 = out_shape
|
| 54 |
+
out_shape3 = out_shape
|
| 55 |
+
out_shape4 = out_shape
|
| 56 |
+
if expand==True:
|
| 57 |
+
out_shape1 = out_shape
|
| 58 |
+
out_shape2 = out_shape*2
|
| 59 |
+
out_shape3 = out_shape*4
|
| 60 |
+
out_shape4 = out_shape*8
|
| 61 |
+
|
| 62 |
+
scratch.layer1_rn = nn.Conv2d(
|
| 63 |
+
in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 64 |
+
)
|
| 65 |
+
scratch.layer2_rn = nn.Conv2d(
|
| 66 |
+
in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 67 |
+
)
|
| 68 |
+
scratch.layer3_rn = nn.Conv2d(
|
| 69 |
+
in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 70 |
+
)
|
| 71 |
+
scratch.layer4_rn = nn.Conv2d(
|
| 72 |
+
in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
return scratch
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
|
| 79 |
+
efficientnet = torch.hub.load(
|
| 80 |
+
"rwightman/gen-efficientnet-pytorch",
|
| 81 |
+
"tf_efficientnet_lite3",
|
| 82 |
+
pretrained=use_pretrained,
|
| 83 |
+
exportable=exportable
|
| 84 |
+
)
|
| 85 |
+
return _make_efficientnet_backbone(efficientnet)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def _make_efficientnet_backbone(effnet):
|
| 89 |
+
pretrained = nn.Module()
|
| 90 |
+
|
| 91 |
+
pretrained.layer1 = nn.Sequential(
|
| 92 |
+
effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
|
| 93 |
+
)
|
| 94 |
+
pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
|
| 95 |
+
pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
|
| 96 |
+
pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
|
| 97 |
+
|
| 98 |
+
return pretrained
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _make_resnet_backbone(resnet):
|
| 102 |
+
pretrained = nn.Module()
|
| 103 |
+
pretrained.layer1 = nn.Sequential(
|
| 104 |
+
resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
pretrained.layer2 = resnet.layer2
|
| 108 |
+
pretrained.layer3 = resnet.layer3
|
| 109 |
+
pretrained.layer4 = resnet.layer4
|
| 110 |
+
|
| 111 |
+
return pretrained
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def _make_pretrained_resnext101_wsl(use_pretrained):
|
| 115 |
+
resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
|
| 116 |
+
return _make_resnet_backbone(resnet)
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class Interpolate(nn.Module):
|
| 121 |
+
"""Interpolation module.
|
| 122 |
+
"""
|
| 123 |
+
|
| 124 |
+
def __init__(self, scale_factor, mode, align_corners=False):
|
| 125 |
+
"""Init.
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
scale_factor (float): scaling
|
| 129 |
+
mode (str): interpolation mode
|
| 130 |
+
"""
|
| 131 |
+
super(Interpolate, self).__init__()
|
| 132 |
+
|
| 133 |
+
self.interp = nn.functional.interpolate
|
| 134 |
+
self.scale_factor = scale_factor
|
| 135 |
+
self.mode = mode
|
| 136 |
+
self.align_corners = align_corners
|
| 137 |
+
|
| 138 |
+
def forward(self, x):
|
| 139 |
+
"""Forward pass.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
x (tensor): input
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
tensor: interpolated data
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
x = self.interp(
|
| 149 |
+
x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
return x
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
class ResidualConvUnit(nn.Module):
|
| 156 |
+
"""Residual convolution module.
|
| 157 |
+
"""
|
| 158 |
+
|
| 159 |
+
def __init__(self, features):
|
| 160 |
+
"""Init.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
features (int): number of features
|
| 164 |
+
"""
|
| 165 |
+
super().__init__()
|
| 166 |
+
|
| 167 |
+
self.conv1 = nn.Conv2d(
|
| 168 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
self.conv2 = nn.Conv2d(
|
| 172 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
self.relu = nn.ReLU(inplace=True)
|
| 176 |
+
|
| 177 |
+
def forward(self, x):
|
| 178 |
+
"""Forward pass.
|
| 179 |
+
|
| 180 |
+
Args:
|
| 181 |
+
x (tensor): input
|
| 182 |
+
|
| 183 |
+
Returns:
|
| 184 |
+
tensor: output
|
| 185 |
+
"""
|
| 186 |
+
out = self.relu(x)
|
| 187 |
+
out = self.conv1(out)
|
| 188 |
+
out = self.relu(out)
|
| 189 |
+
out = self.conv2(out)
|
| 190 |
+
|
| 191 |
+
return out + x
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
class FeatureFusionBlock(nn.Module):
|
| 195 |
+
"""Feature fusion block.
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
def __init__(self, features):
|
| 199 |
+
"""Init.
|
| 200 |
+
|
| 201 |
+
Args:
|
| 202 |
+
features (int): number of features
|
| 203 |
+
"""
|
| 204 |
+
super(FeatureFusionBlock, self).__init__()
|
| 205 |
+
|
| 206 |
+
self.resConfUnit1 = ResidualConvUnit(features)
|
| 207 |
+
self.resConfUnit2 = ResidualConvUnit(features)
|
| 208 |
+
|
| 209 |
+
def forward(self, *xs):
|
| 210 |
+
"""Forward pass.
|
| 211 |
+
|
| 212 |
+
Returns:
|
| 213 |
+
tensor: output
|
| 214 |
+
"""
|
| 215 |
+
output = xs[0]
|
| 216 |
+
|
| 217 |
+
if len(xs) == 2:
|
| 218 |
+
output += self.resConfUnit1(xs[1])
|
| 219 |
+
|
| 220 |
+
output = self.resConfUnit2(output)
|
| 221 |
+
|
| 222 |
+
output = nn.functional.interpolate(
|
| 223 |
+
output, scale_factor=2, mode="bilinear", align_corners=True
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
return output
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
class ResidualConvUnit_custom(nn.Module):
|
| 232 |
+
"""Residual convolution module.
|
| 233 |
+
"""
|
| 234 |
+
|
| 235 |
+
def __init__(self, features, activation, bn):
|
| 236 |
+
"""Init.
|
| 237 |
+
|
| 238 |
+
Args:
|
| 239 |
+
features (int): number of features
|
| 240 |
+
"""
|
| 241 |
+
super().__init__()
|
| 242 |
+
|
| 243 |
+
self.bn = bn
|
| 244 |
+
|
| 245 |
+
self.groups=1
|
| 246 |
+
|
| 247 |
+
self.conv1 = nn.Conv2d(
|
| 248 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
self.conv2 = nn.Conv2d(
|
| 252 |
+
features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
if self.bn==True:
|
| 256 |
+
self.bn1 = nn.BatchNorm2d(features)
|
| 257 |
+
self.bn2 = nn.BatchNorm2d(features)
|
| 258 |
+
|
| 259 |
+
self.activation = activation
|
| 260 |
+
|
| 261 |
+
self.skip_add = nn.quantized.FloatFunctional()
|
| 262 |
+
|
| 263 |
+
def forward(self, x):
|
| 264 |
+
"""Forward pass.
|
| 265 |
+
|
| 266 |
+
Args:
|
| 267 |
+
x (tensor): input
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
tensor: output
|
| 271 |
+
"""
|
| 272 |
+
|
| 273 |
+
out = self.activation(x)
|
| 274 |
+
out = self.conv1(out)
|
| 275 |
+
if self.bn==True:
|
| 276 |
+
out = self.bn1(out)
|
| 277 |
+
|
| 278 |
+
out = self.activation(out)
|
| 279 |
+
out = self.conv2(out)
|
| 280 |
+
if self.bn==True:
|
| 281 |
+
out = self.bn2(out)
|
| 282 |
+
|
| 283 |
+
if self.groups > 1:
|
| 284 |
+
out = self.conv_merge(out)
|
| 285 |
+
|
| 286 |
+
return self.skip_add.add(out, x)
|
| 287 |
+
|
| 288 |
+
# return out + x
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
class FeatureFusionBlock_custom(nn.Module):
|
| 292 |
+
"""Feature fusion block.
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
|
| 296 |
+
"""Init.
|
| 297 |
+
|
| 298 |
+
Args:
|
| 299 |
+
features (int): number of features
|
| 300 |
+
"""
|
| 301 |
+
super(FeatureFusionBlock_custom, self).__init__()
|
| 302 |
+
|
| 303 |
+
self.deconv = deconv
|
| 304 |
+
self.align_corners = align_corners
|
| 305 |
+
|
| 306 |
+
self.groups=1
|
| 307 |
+
|
| 308 |
+
self.expand = expand
|
| 309 |
+
out_features = features
|
| 310 |
+
if self.expand==True:
|
| 311 |
+
out_features = features//2
|
| 312 |
+
|
| 313 |
+
self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
|
| 314 |
+
|
| 315 |
+
self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
|
| 316 |
+
self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
|
| 317 |
+
|
| 318 |
+
self.skip_add = nn.quantized.FloatFunctional()
|
| 319 |
+
|
| 320 |
+
def forward(self, *xs):
|
| 321 |
+
"""Forward pass.
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
tensor: output
|
| 325 |
+
"""
|
| 326 |
+
output = xs[0]
|
| 327 |
+
|
| 328 |
+
if len(xs) == 2:
|
| 329 |
+
res = self.resConfUnit1(xs[1])
|
| 330 |
+
output = self.skip_add.add(output, res)
|
| 331 |
+
# output += res
|
| 332 |
+
|
| 333 |
+
output = self.resConfUnit2(output)
|
| 334 |
+
|
| 335 |
+
output = nn.functional.interpolate(
|
| 336 |
+
output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
output = self.out_conv(output)
|
| 340 |
+
|
| 341 |
+
return output
|
| 342 |
+
|
RAVE-main/annotator/midas/midas/dpt_depth.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from .base_model import BaseModel
|
| 6 |
+
from .blocks import (
|
| 7 |
+
FeatureFusionBlock,
|
| 8 |
+
FeatureFusionBlock_custom,
|
| 9 |
+
Interpolate,
|
| 10 |
+
_make_encoder,
|
| 11 |
+
forward_vit,
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _make_fusion_block(features, use_bn):
|
| 16 |
+
return FeatureFusionBlock_custom(
|
| 17 |
+
features,
|
| 18 |
+
nn.ReLU(False),
|
| 19 |
+
deconv=False,
|
| 20 |
+
bn=use_bn,
|
| 21 |
+
expand=False,
|
| 22 |
+
align_corners=True,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class DPT(BaseModel):
|
| 27 |
+
def __init__(
|
| 28 |
+
self,
|
| 29 |
+
head,
|
| 30 |
+
features=256,
|
| 31 |
+
backbone="vitb_rn50_384",
|
| 32 |
+
readout="project",
|
| 33 |
+
channels_last=False,
|
| 34 |
+
use_bn=False,
|
| 35 |
+
):
|
| 36 |
+
|
| 37 |
+
super(DPT, self).__init__()
|
| 38 |
+
|
| 39 |
+
self.channels_last = channels_last
|
| 40 |
+
|
| 41 |
+
hooks = {
|
| 42 |
+
"vitb_rn50_384": [0, 1, 8, 11],
|
| 43 |
+
"vitb16_384": [2, 5, 8, 11],
|
| 44 |
+
"vitl16_384": [5, 11, 17, 23],
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
# Instantiate backbone and reassemble blocks
|
| 48 |
+
self.pretrained, self.scratch = _make_encoder(
|
| 49 |
+
backbone,
|
| 50 |
+
features,
|
| 51 |
+
False, # Set to true of you want to train from scratch, uses ImageNet weights
|
| 52 |
+
groups=1,
|
| 53 |
+
expand=False,
|
| 54 |
+
exportable=False,
|
| 55 |
+
hooks=hooks[backbone],
|
| 56 |
+
use_readout=readout,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
|
| 60 |
+
self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
|
| 61 |
+
self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
|
| 62 |
+
self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
|
| 63 |
+
|
| 64 |
+
self.scratch.output_conv = head
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def forward(self, x):
|
| 68 |
+
if self.channels_last == True:
|
| 69 |
+
x.contiguous(memory_format=torch.channels_last)
|
| 70 |
+
|
| 71 |
+
layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
|
| 72 |
+
|
| 73 |
+
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
| 74 |
+
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
| 75 |
+
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
| 76 |
+
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
| 77 |
+
|
| 78 |
+
path_4 = self.scratch.refinenet4(layer_4_rn)
|
| 79 |
+
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
| 80 |
+
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
| 81 |
+
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
| 82 |
+
|
| 83 |
+
out = self.scratch.output_conv(path_1)
|
| 84 |
+
|
| 85 |
+
return out
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class DPTDepthModel(DPT):
|
| 89 |
+
def __init__(self, path=None, non_negative=True, **kwargs):
|
| 90 |
+
features = kwargs["features"] if "features" in kwargs else 256
|
| 91 |
+
|
| 92 |
+
head = nn.Sequential(
|
| 93 |
+
nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
|
| 94 |
+
Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
|
| 95 |
+
nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
|
| 96 |
+
nn.ReLU(True),
|
| 97 |
+
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
| 98 |
+
nn.ReLU(True) if non_negative else nn.Identity(),
|
| 99 |
+
nn.Identity(),
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
super().__init__(head, **kwargs)
|
| 103 |
+
|
| 104 |
+
if path is not None:
|
| 105 |
+
self.load(path)
|
| 106 |
+
|
| 107 |
+
def forward(self, x):
|
| 108 |
+
return super().forward(x).squeeze(dim=1)
|
| 109 |
+
|
RAVE-main/annotator/midas/midas/midas_net.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
| 2 |
+
This file contains code that is adapted from
|
| 3 |
+
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
| 4 |
+
"""
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from .base_model import BaseModel
|
| 9 |
+
from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MidasNet(BaseModel):
|
| 13 |
+
"""Network for monocular depth estimation.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, path=None, features=256, non_negative=True):
|
| 17 |
+
"""Init.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
path (str, optional): Path to saved model. Defaults to None.
|
| 21 |
+
features (int, optional): Number of features. Defaults to 256.
|
| 22 |
+
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
| 23 |
+
"""
|
| 24 |
+
print("Loading weights: ", path)
|
| 25 |
+
|
| 26 |
+
super(MidasNet, self).__init__()
|
| 27 |
+
|
| 28 |
+
use_pretrained = False if path is None else True
|
| 29 |
+
|
| 30 |
+
self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
|
| 31 |
+
|
| 32 |
+
self.scratch.refinenet4 = FeatureFusionBlock(features)
|
| 33 |
+
self.scratch.refinenet3 = FeatureFusionBlock(features)
|
| 34 |
+
self.scratch.refinenet2 = FeatureFusionBlock(features)
|
| 35 |
+
self.scratch.refinenet1 = FeatureFusionBlock(features)
|
| 36 |
+
|
| 37 |
+
self.scratch.output_conv = nn.Sequential(
|
| 38 |
+
nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
|
| 39 |
+
Interpolate(scale_factor=2, mode="bilinear"),
|
| 40 |
+
nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
|
| 41 |
+
nn.ReLU(True),
|
| 42 |
+
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
| 43 |
+
nn.ReLU(True) if non_negative else nn.Identity(),
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
if path:
|
| 47 |
+
self.load(path)
|
| 48 |
+
|
| 49 |
+
def forward(self, x):
|
| 50 |
+
"""Forward pass.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
x (tensor): input data (image)
|
| 54 |
+
|
| 55 |
+
Returns:
|
| 56 |
+
tensor: depth
|
| 57 |
+
"""
|
| 58 |
+
|
| 59 |
+
layer_1 = self.pretrained.layer1(x)
|
| 60 |
+
layer_2 = self.pretrained.layer2(layer_1)
|
| 61 |
+
layer_3 = self.pretrained.layer3(layer_2)
|
| 62 |
+
layer_4 = self.pretrained.layer4(layer_3)
|
| 63 |
+
|
| 64 |
+
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
| 65 |
+
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
| 66 |
+
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
| 67 |
+
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
| 68 |
+
|
| 69 |
+
path_4 = self.scratch.refinenet4(layer_4_rn)
|
| 70 |
+
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
| 71 |
+
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
| 72 |
+
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
| 73 |
+
|
| 74 |
+
out = self.scratch.output_conv(path_1)
|
| 75 |
+
|
| 76 |
+
return torch.squeeze(out, dim=1)
|
RAVE-main/annotator/midas/midas/midas_net_custom.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
|
| 2 |
+
This file contains code that is adapted from
|
| 3 |
+
https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
|
| 4 |
+
"""
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
from .base_model import BaseModel
|
| 9 |
+
from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class MidasNet_small(BaseModel):
|
| 13 |
+
"""Network for monocular depth estimation.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
|
| 17 |
+
blocks={'expand': True}):
|
| 18 |
+
"""Init.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
path (str, optional): Path to saved model. Defaults to None.
|
| 22 |
+
features (int, optional): Number of features. Defaults to 256.
|
| 23 |
+
backbone (str, optional): Backbone network for encoder. Defaults to resnet50
|
| 24 |
+
"""
|
| 25 |
+
print("Loading weights: ", path)
|
| 26 |
+
|
| 27 |
+
super(MidasNet_small, self).__init__()
|
| 28 |
+
|
| 29 |
+
use_pretrained = False if path else True
|
| 30 |
+
|
| 31 |
+
self.channels_last = channels_last
|
| 32 |
+
self.blocks = blocks
|
| 33 |
+
self.backbone = backbone
|
| 34 |
+
|
| 35 |
+
self.groups = 1
|
| 36 |
+
|
| 37 |
+
features1=features
|
| 38 |
+
features2=features
|
| 39 |
+
features3=features
|
| 40 |
+
features4=features
|
| 41 |
+
self.expand = False
|
| 42 |
+
if "expand" in self.blocks and self.blocks['expand'] == True:
|
| 43 |
+
self.expand = True
|
| 44 |
+
features1=features
|
| 45 |
+
features2=features*2
|
| 46 |
+
features3=features*4
|
| 47 |
+
features4=features*8
|
| 48 |
+
|
| 49 |
+
self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
|
| 50 |
+
|
| 51 |
+
self.scratch.activation = nn.ReLU(False)
|
| 52 |
+
|
| 53 |
+
self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
| 54 |
+
self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
| 55 |
+
self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
|
| 56 |
+
self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
self.scratch.output_conv = nn.Sequential(
|
| 60 |
+
nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
|
| 61 |
+
Interpolate(scale_factor=2, mode="bilinear"),
|
| 62 |
+
nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
|
| 63 |
+
self.scratch.activation,
|
| 64 |
+
nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
|
| 65 |
+
nn.ReLU(True) if non_negative else nn.Identity(),
|
| 66 |
+
nn.Identity(),
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
if path:
|
| 70 |
+
self.load(path)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def forward(self, x):
|
| 74 |
+
"""Forward pass.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
x (tensor): input data (image)
|
| 78 |
+
|
| 79 |
+
Returns:
|
| 80 |
+
tensor: depth
|
| 81 |
+
"""
|
| 82 |
+
if self.channels_last==True:
|
| 83 |
+
print("self.channels_last = ", self.channels_last)
|
| 84 |
+
x.contiguous(memory_format=torch.channels_last)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
layer_1 = self.pretrained.layer1(x)
|
| 88 |
+
layer_2 = self.pretrained.layer2(layer_1)
|
| 89 |
+
layer_3 = self.pretrained.layer3(layer_2)
|
| 90 |
+
layer_4 = self.pretrained.layer4(layer_3)
|
| 91 |
+
|
| 92 |
+
layer_1_rn = self.scratch.layer1_rn(layer_1)
|
| 93 |
+
layer_2_rn = self.scratch.layer2_rn(layer_2)
|
| 94 |
+
layer_3_rn = self.scratch.layer3_rn(layer_3)
|
| 95 |
+
layer_4_rn = self.scratch.layer4_rn(layer_4)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
path_4 = self.scratch.refinenet4(layer_4_rn)
|
| 99 |
+
path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
|
| 100 |
+
path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
|
| 101 |
+
path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
|
| 102 |
+
|
| 103 |
+
out = self.scratch.output_conv(path_1)
|
| 104 |
+
|
| 105 |
+
return torch.squeeze(out, dim=1)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def fuse_model(m):
|
| 110 |
+
prev_previous_type = nn.Identity()
|
| 111 |
+
prev_previous_name = ''
|
| 112 |
+
previous_type = nn.Identity()
|
| 113 |
+
previous_name = ''
|
| 114 |
+
for name, module in m.named_modules():
|
| 115 |
+
if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
|
| 116 |
+
# print("FUSED ", prev_previous_name, previous_name, name)
|
| 117 |
+
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
|
| 118 |
+
elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
|
| 119 |
+
# print("FUSED ", prev_previous_name, previous_name)
|
| 120 |
+
torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
|
| 121 |
+
# elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
|
| 122 |
+
# print("FUSED ", previous_name, name)
|
| 123 |
+
# torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
|
| 124 |
+
|
| 125 |
+
prev_previous_type = previous_type
|
| 126 |
+
prev_previous_name = previous_name
|
| 127 |
+
previous_type = type(module)
|
| 128 |
+
previous_name = name
|
RAVE-main/annotator/midas/midas/transforms.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
import math
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
|
| 7 |
+
"""Rezise the sample to ensure the given size. Keeps aspect ratio.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
sample (dict): sample
|
| 11 |
+
size (tuple): image size
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
tuple: new size
|
| 15 |
+
"""
|
| 16 |
+
shape = list(sample["disparity"].shape)
|
| 17 |
+
|
| 18 |
+
if shape[0] >= size[0] and shape[1] >= size[1]:
|
| 19 |
+
return sample
|
| 20 |
+
|
| 21 |
+
scale = [0, 0]
|
| 22 |
+
scale[0] = size[0] / shape[0]
|
| 23 |
+
scale[1] = size[1] / shape[1]
|
| 24 |
+
|
| 25 |
+
scale = max(scale)
|
| 26 |
+
|
| 27 |
+
shape[0] = math.ceil(scale * shape[0])
|
| 28 |
+
shape[1] = math.ceil(scale * shape[1])
|
| 29 |
+
|
| 30 |
+
# resize
|
| 31 |
+
sample["image"] = cv2.resize(
|
| 32 |
+
sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
sample["disparity"] = cv2.resize(
|
| 36 |
+
sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
|
| 37 |
+
)
|
| 38 |
+
sample["mask"] = cv2.resize(
|
| 39 |
+
sample["mask"].astype(np.float32),
|
| 40 |
+
tuple(shape[::-1]),
|
| 41 |
+
interpolation=cv2.INTER_NEAREST,
|
| 42 |
+
)
|
| 43 |
+
sample["mask"] = sample["mask"].astype(bool)
|
| 44 |
+
|
| 45 |
+
return tuple(shape)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class Resize(object):
|
| 49 |
+
"""Resize sample to given size (width, height).
|
| 50 |
+
"""
|
| 51 |
+
|
| 52 |
+
def __init__(
|
| 53 |
+
self,
|
| 54 |
+
width,
|
| 55 |
+
height,
|
| 56 |
+
resize_target=True,
|
| 57 |
+
keep_aspect_ratio=False,
|
| 58 |
+
ensure_multiple_of=1,
|
| 59 |
+
resize_method="lower_bound",
|
| 60 |
+
image_interpolation_method=cv2.INTER_AREA,
|
| 61 |
+
):
|
| 62 |
+
"""Init.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
width (int): desired output width
|
| 66 |
+
height (int): desired output height
|
| 67 |
+
resize_target (bool, optional):
|
| 68 |
+
True: Resize the full sample (image, mask, target).
|
| 69 |
+
False: Resize image only.
|
| 70 |
+
Defaults to True.
|
| 71 |
+
keep_aspect_ratio (bool, optional):
|
| 72 |
+
True: Keep the aspect ratio of the input sample.
|
| 73 |
+
Output sample might not have the given width and height, and
|
| 74 |
+
resize behaviour depends on the parameter 'resize_method'.
|
| 75 |
+
Defaults to False.
|
| 76 |
+
ensure_multiple_of (int, optional):
|
| 77 |
+
Output width and height is constrained to be multiple of this parameter.
|
| 78 |
+
Defaults to 1.
|
| 79 |
+
resize_method (str, optional):
|
| 80 |
+
"lower_bound": Output will be at least as large as the given size.
|
| 81 |
+
"upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
|
| 82 |
+
"minimal": Scale as least as possible. (Output size might be smaller than given size.)
|
| 83 |
+
Defaults to "lower_bound".
|
| 84 |
+
"""
|
| 85 |
+
self.__width = width
|
| 86 |
+
self.__height = height
|
| 87 |
+
|
| 88 |
+
self.__resize_target = resize_target
|
| 89 |
+
self.__keep_aspect_ratio = keep_aspect_ratio
|
| 90 |
+
self.__multiple_of = ensure_multiple_of
|
| 91 |
+
self.__resize_method = resize_method
|
| 92 |
+
self.__image_interpolation_method = image_interpolation_method
|
| 93 |
+
|
| 94 |
+
def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
|
| 95 |
+
y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
| 96 |
+
|
| 97 |
+
if max_val is not None and y > max_val:
|
| 98 |
+
y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
| 99 |
+
|
| 100 |
+
if y < min_val:
|
| 101 |
+
y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
|
| 102 |
+
|
| 103 |
+
return y
|
| 104 |
+
|
| 105 |
+
def get_size(self, width, height):
|
| 106 |
+
# determine new height and width
|
| 107 |
+
scale_height = self.__height / height
|
| 108 |
+
scale_width = self.__width / width
|
| 109 |
+
|
| 110 |
+
if self.__keep_aspect_ratio:
|
| 111 |
+
if self.__resize_method == "lower_bound":
|
| 112 |
+
# scale such that output size is lower bound
|
| 113 |
+
if scale_width > scale_height:
|
| 114 |
+
# fit width
|
| 115 |
+
scale_height = scale_width
|
| 116 |
+
else:
|
| 117 |
+
# fit height
|
| 118 |
+
scale_width = scale_height
|
| 119 |
+
elif self.__resize_method == "upper_bound":
|
| 120 |
+
# scale such that output size is upper bound
|
| 121 |
+
if scale_width < scale_height:
|
| 122 |
+
# fit width
|
| 123 |
+
scale_height = scale_width
|
| 124 |
+
else:
|
| 125 |
+
# fit height
|
| 126 |
+
scale_width = scale_height
|
| 127 |
+
elif self.__resize_method == "minimal":
|
| 128 |
+
# scale as least as possbile
|
| 129 |
+
if abs(1 - scale_width) < abs(1 - scale_height):
|
| 130 |
+
# fit width
|
| 131 |
+
scale_height = scale_width
|
| 132 |
+
else:
|
| 133 |
+
# fit height
|
| 134 |
+
scale_width = scale_height
|
| 135 |
+
else:
|
| 136 |
+
raise ValueError(
|
| 137 |
+
f"resize_method {self.__resize_method} not implemented"
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
if self.__resize_method == "lower_bound":
|
| 141 |
+
new_height = self.constrain_to_multiple_of(
|
| 142 |
+
scale_height * height, min_val=self.__height
|
| 143 |
+
)
|
| 144 |
+
new_width = self.constrain_to_multiple_of(
|
| 145 |
+
scale_width * width, min_val=self.__width
|
| 146 |
+
)
|
| 147 |
+
elif self.__resize_method == "upper_bound":
|
| 148 |
+
new_height = self.constrain_to_multiple_of(
|
| 149 |
+
scale_height * height, max_val=self.__height
|
| 150 |
+
)
|
| 151 |
+
new_width = self.constrain_to_multiple_of(
|
| 152 |
+
scale_width * width, max_val=self.__width
|
| 153 |
+
)
|
| 154 |
+
elif self.__resize_method == "minimal":
|
| 155 |
+
new_height = self.constrain_to_multiple_of(scale_height * height)
|
| 156 |
+
new_width = self.constrain_to_multiple_of(scale_width * width)
|
| 157 |
+
else:
|
| 158 |
+
raise ValueError(f"resize_method {self.__resize_method} not implemented")
|
| 159 |
+
|
| 160 |
+
return (new_width, new_height)
|
| 161 |
+
|
| 162 |
+
def __call__(self, sample):
|
| 163 |
+
width, height = self.get_size(
|
| 164 |
+
sample["image"].shape[1], sample["image"].shape[0]
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# resize sample
|
| 168 |
+
sample["image"] = cv2.resize(
|
| 169 |
+
sample["image"],
|
| 170 |
+
(width, height),
|
| 171 |
+
interpolation=self.__image_interpolation_method,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
if self.__resize_target:
|
| 175 |
+
if "disparity" in sample:
|
| 176 |
+
sample["disparity"] = cv2.resize(
|
| 177 |
+
sample["disparity"],
|
| 178 |
+
(width, height),
|
| 179 |
+
interpolation=cv2.INTER_NEAREST,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
if "depth" in sample:
|
| 183 |
+
sample["depth"] = cv2.resize(
|
| 184 |
+
sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
sample["mask"] = cv2.resize(
|
| 188 |
+
sample["mask"].astype(np.float32),
|
| 189 |
+
(width, height),
|
| 190 |
+
interpolation=cv2.INTER_NEAREST,
|
| 191 |
+
)
|
| 192 |
+
sample["mask"] = sample["mask"].astype(bool)
|
| 193 |
+
|
| 194 |
+
return sample
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class NormalizeImage(object):
|
| 198 |
+
"""Normlize image by given mean and std.
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
def __init__(self, mean, std):
|
| 202 |
+
self.__mean = mean
|
| 203 |
+
self.__std = std
|
| 204 |
+
|
| 205 |
+
def __call__(self, sample):
|
| 206 |
+
sample["image"] = (sample["image"] - self.__mean) / self.__std
|
| 207 |
+
|
| 208 |
+
return sample
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
class PrepareForNet(object):
|
| 212 |
+
"""Prepare sample for usage as network input.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
def __init__(self):
|
| 216 |
+
pass
|
| 217 |
+
|
| 218 |
+
def __call__(self, sample):
|
| 219 |
+
image = np.transpose(sample["image"], (2, 0, 1))
|
| 220 |
+
sample["image"] = np.ascontiguousarray(image).astype(np.float32)
|
| 221 |
+
|
| 222 |
+
if "mask" in sample:
|
| 223 |
+
sample["mask"] = sample["mask"].astype(np.float32)
|
| 224 |
+
sample["mask"] = np.ascontiguousarray(sample["mask"])
|
| 225 |
+
|
| 226 |
+
if "disparity" in sample:
|
| 227 |
+
disparity = sample["disparity"].astype(np.float32)
|
| 228 |
+
sample["disparity"] = np.ascontiguousarray(disparity)
|
| 229 |
+
|
| 230 |
+
if "depth" in sample:
|
| 231 |
+
depth = sample["depth"].astype(np.float32)
|
| 232 |
+
sample["depth"] = np.ascontiguousarray(depth)
|
| 233 |
+
|
| 234 |
+
return sample
|
RAVE-main/annotator/midas/midas/vit.py
ADDED
|
@@ -0,0 +1,491 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import timm
|
| 4 |
+
import types
|
| 5 |
+
import math
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class Slice(nn.Module):
|
| 10 |
+
def __init__(self, start_index=1):
|
| 11 |
+
super(Slice, self).__init__()
|
| 12 |
+
self.start_index = start_index
|
| 13 |
+
|
| 14 |
+
def forward(self, x):
|
| 15 |
+
return x[:, self.start_index :]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class AddReadout(nn.Module):
|
| 19 |
+
def __init__(self, start_index=1):
|
| 20 |
+
super(AddReadout, self).__init__()
|
| 21 |
+
self.start_index = start_index
|
| 22 |
+
|
| 23 |
+
def forward(self, x):
|
| 24 |
+
if self.start_index == 2:
|
| 25 |
+
readout = (x[:, 0] + x[:, 1]) / 2
|
| 26 |
+
else:
|
| 27 |
+
readout = x[:, 0]
|
| 28 |
+
return x[:, self.start_index :] + readout.unsqueeze(1)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ProjectReadout(nn.Module):
|
| 32 |
+
def __init__(self, in_features, start_index=1):
|
| 33 |
+
super(ProjectReadout, self).__init__()
|
| 34 |
+
self.start_index = start_index
|
| 35 |
+
|
| 36 |
+
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
|
| 37 |
+
|
| 38 |
+
def forward(self, x):
|
| 39 |
+
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
|
| 40 |
+
features = torch.cat((x[:, self.start_index :], readout), -1)
|
| 41 |
+
|
| 42 |
+
return self.project(features)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class Transpose(nn.Module):
|
| 46 |
+
def __init__(self, dim0, dim1):
|
| 47 |
+
super(Transpose, self).__init__()
|
| 48 |
+
self.dim0 = dim0
|
| 49 |
+
self.dim1 = dim1
|
| 50 |
+
|
| 51 |
+
def forward(self, x):
|
| 52 |
+
x = x.transpose(self.dim0, self.dim1)
|
| 53 |
+
return x
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def forward_vit(pretrained, x):
|
| 57 |
+
b, c, h, w = x.shape
|
| 58 |
+
|
| 59 |
+
glob = pretrained.model.forward_flex(x)
|
| 60 |
+
|
| 61 |
+
layer_1 = pretrained.activations["1"]
|
| 62 |
+
layer_2 = pretrained.activations["2"]
|
| 63 |
+
layer_3 = pretrained.activations["3"]
|
| 64 |
+
layer_4 = pretrained.activations["4"]
|
| 65 |
+
|
| 66 |
+
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
|
| 67 |
+
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
|
| 68 |
+
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
|
| 69 |
+
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
|
| 70 |
+
|
| 71 |
+
unflatten = nn.Sequential(
|
| 72 |
+
nn.Unflatten(
|
| 73 |
+
2,
|
| 74 |
+
torch.Size(
|
| 75 |
+
[
|
| 76 |
+
h // pretrained.model.patch_size[1],
|
| 77 |
+
w // pretrained.model.patch_size[0],
|
| 78 |
+
]
|
| 79 |
+
),
|
| 80 |
+
)
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
if layer_1.ndim == 3:
|
| 84 |
+
layer_1 = unflatten(layer_1)
|
| 85 |
+
if layer_2.ndim == 3:
|
| 86 |
+
layer_2 = unflatten(layer_2)
|
| 87 |
+
if layer_3.ndim == 3:
|
| 88 |
+
layer_3 = unflatten(layer_3)
|
| 89 |
+
if layer_4.ndim == 3:
|
| 90 |
+
layer_4 = unflatten(layer_4)
|
| 91 |
+
|
| 92 |
+
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
|
| 93 |
+
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
|
| 94 |
+
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
|
| 95 |
+
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
|
| 96 |
+
|
| 97 |
+
return layer_1, layer_2, layer_3, layer_4
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def _resize_pos_embed(self, posemb, gs_h, gs_w):
|
| 101 |
+
posemb_tok, posemb_grid = (
|
| 102 |
+
posemb[:, : self.start_index],
|
| 103 |
+
posemb[0, self.start_index :],
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
gs_old = int(math.sqrt(len(posemb_grid)))
|
| 107 |
+
|
| 108 |
+
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
|
| 109 |
+
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
|
| 110 |
+
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
|
| 111 |
+
|
| 112 |
+
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
|
| 113 |
+
|
| 114 |
+
return posemb
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def forward_flex(self, x):
|
| 118 |
+
b, c, h, w = x.shape
|
| 119 |
+
|
| 120 |
+
pos_embed = self._resize_pos_embed(
|
| 121 |
+
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
B = x.shape[0]
|
| 125 |
+
|
| 126 |
+
if hasattr(self.patch_embed, "backbone"):
|
| 127 |
+
x = self.patch_embed.backbone(x)
|
| 128 |
+
if isinstance(x, (list, tuple)):
|
| 129 |
+
x = x[-1] # last feature if backbone outputs list/tuple of features
|
| 130 |
+
|
| 131 |
+
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
|
| 132 |
+
|
| 133 |
+
if getattr(self, "dist_token", None) is not None:
|
| 134 |
+
cls_tokens = self.cls_token.expand(
|
| 135 |
+
B, -1, -1
|
| 136 |
+
) # stole cls_tokens impl from Phil Wang, thanks
|
| 137 |
+
dist_token = self.dist_token.expand(B, -1, -1)
|
| 138 |
+
x = torch.cat((cls_tokens, dist_token, x), dim=1)
|
| 139 |
+
else:
|
| 140 |
+
cls_tokens = self.cls_token.expand(
|
| 141 |
+
B, -1, -1
|
| 142 |
+
) # stole cls_tokens impl from Phil Wang, thanks
|
| 143 |
+
x = torch.cat((cls_tokens, x), dim=1)
|
| 144 |
+
|
| 145 |
+
x = x + pos_embed
|
| 146 |
+
x = self.pos_drop(x)
|
| 147 |
+
|
| 148 |
+
for blk in self.blocks:
|
| 149 |
+
x = blk(x)
|
| 150 |
+
|
| 151 |
+
x = self.norm(x)
|
| 152 |
+
|
| 153 |
+
return x
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
activations = {}
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
def get_activation(name):
|
| 160 |
+
def hook(model, input, output):
|
| 161 |
+
activations[name] = output
|
| 162 |
+
|
| 163 |
+
return hook
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def get_readout_oper(vit_features, features, use_readout, start_index=1):
|
| 167 |
+
if use_readout == "ignore":
|
| 168 |
+
readout_oper = [Slice(start_index)] * len(features)
|
| 169 |
+
elif use_readout == "add":
|
| 170 |
+
readout_oper = [AddReadout(start_index)] * len(features)
|
| 171 |
+
elif use_readout == "project":
|
| 172 |
+
readout_oper = [
|
| 173 |
+
ProjectReadout(vit_features, start_index) for out_feat in features
|
| 174 |
+
]
|
| 175 |
+
else:
|
| 176 |
+
assert (
|
| 177 |
+
False
|
| 178 |
+
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
|
| 179 |
+
|
| 180 |
+
return readout_oper
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def _make_vit_b16_backbone(
|
| 184 |
+
model,
|
| 185 |
+
features=[96, 192, 384, 768],
|
| 186 |
+
size=[384, 384],
|
| 187 |
+
hooks=[2, 5, 8, 11],
|
| 188 |
+
vit_features=768,
|
| 189 |
+
use_readout="ignore",
|
| 190 |
+
start_index=1,
|
| 191 |
+
):
|
| 192 |
+
pretrained = nn.Module()
|
| 193 |
+
|
| 194 |
+
pretrained.model = model
|
| 195 |
+
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
| 196 |
+
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
| 197 |
+
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
| 198 |
+
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
| 199 |
+
|
| 200 |
+
pretrained.activations = activations
|
| 201 |
+
|
| 202 |
+
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
| 203 |
+
|
| 204 |
+
# 32, 48, 136, 384
|
| 205 |
+
pretrained.act_postprocess1 = nn.Sequential(
|
| 206 |
+
readout_oper[0],
|
| 207 |
+
Transpose(1, 2),
|
| 208 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 209 |
+
nn.Conv2d(
|
| 210 |
+
in_channels=vit_features,
|
| 211 |
+
out_channels=features[0],
|
| 212 |
+
kernel_size=1,
|
| 213 |
+
stride=1,
|
| 214 |
+
padding=0,
|
| 215 |
+
),
|
| 216 |
+
nn.ConvTranspose2d(
|
| 217 |
+
in_channels=features[0],
|
| 218 |
+
out_channels=features[0],
|
| 219 |
+
kernel_size=4,
|
| 220 |
+
stride=4,
|
| 221 |
+
padding=0,
|
| 222 |
+
bias=True,
|
| 223 |
+
dilation=1,
|
| 224 |
+
groups=1,
|
| 225 |
+
),
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
pretrained.act_postprocess2 = nn.Sequential(
|
| 229 |
+
readout_oper[1],
|
| 230 |
+
Transpose(1, 2),
|
| 231 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 232 |
+
nn.Conv2d(
|
| 233 |
+
in_channels=vit_features,
|
| 234 |
+
out_channels=features[1],
|
| 235 |
+
kernel_size=1,
|
| 236 |
+
stride=1,
|
| 237 |
+
padding=0,
|
| 238 |
+
),
|
| 239 |
+
nn.ConvTranspose2d(
|
| 240 |
+
in_channels=features[1],
|
| 241 |
+
out_channels=features[1],
|
| 242 |
+
kernel_size=2,
|
| 243 |
+
stride=2,
|
| 244 |
+
padding=0,
|
| 245 |
+
bias=True,
|
| 246 |
+
dilation=1,
|
| 247 |
+
groups=1,
|
| 248 |
+
),
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
pretrained.act_postprocess3 = nn.Sequential(
|
| 252 |
+
readout_oper[2],
|
| 253 |
+
Transpose(1, 2),
|
| 254 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 255 |
+
nn.Conv2d(
|
| 256 |
+
in_channels=vit_features,
|
| 257 |
+
out_channels=features[2],
|
| 258 |
+
kernel_size=1,
|
| 259 |
+
stride=1,
|
| 260 |
+
padding=0,
|
| 261 |
+
),
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
pretrained.act_postprocess4 = nn.Sequential(
|
| 265 |
+
readout_oper[3],
|
| 266 |
+
Transpose(1, 2),
|
| 267 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 268 |
+
nn.Conv2d(
|
| 269 |
+
in_channels=vit_features,
|
| 270 |
+
out_channels=features[3],
|
| 271 |
+
kernel_size=1,
|
| 272 |
+
stride=1,
|
| 273 |
+
padding=0,
|
| 274 |
+
),
|
| 275 |
+
nn.Conv2d(
|
| 276 |
+
in_channels=features[3],
|
| 277 |
+
out_channels=features[3],
|
| 278 |
+
kernel_size=3,
|
| 279 |
+
stride=2,
|
| 280 |
+
padding=1,
|
| 281 |
+
),
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
pretrained.model.start_index = start_index
|
| 285 |
+
pretrained.model.patch_size = [16, 16]
|
| 286 |
+
|
| 287 |
+
# We inject this function into the VisionTransformer instances so that
|
| 288 |
+
# we can use it with interpolated position embeddings without modifying the library source.
|
| 289 |
+
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
| 290 |
+
pretrained.model._resize_pos_embed = types.MethodType(
|
| 291 |
+
_resize_pos_embed, pretrained.model
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
return pretrained
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
|
| 298 |
+
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
|
| 299 |
+
|
| 300 |
+
hooks = [5, 11, 17, 23] if hooks == None else hooks
|
| 301 |
+
return _make_vit_b16_backbone(
|
| 302 |
+
model,
|
| 303 |
+
features=[256, 512, 1024, 1024],
|
| 304 |
+
hooks=hooks,
|
| 305 |
+
vit_features=1024,
|
| 306 |
+
use_readout=use_readout,
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
|
| 311 |
+
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
|
| 312 |
+
|
| 313 |
+
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
| 314 |
+
return _make_vit_b16_backbone(
|
| 315 |
+
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
|
| 320 |
+
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
|
| 321 |
+
|
| 322 |
+
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
| 323 |
+
return _make_vit_b16_backbone(
|
| 324 |
+
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
| 325 |
+
)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
|
| 329 |
+
model = timm.create_model(
|
| 330 |
+
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
| 334 |
+
return _make_vit_b16_backbone(
|
| 335 |
+
model,
|
| 336 |
+
features=[96, 192, 384, 768],
|
| 337 |
+
hooks=hooks,
|
| 338 |
+
use_readout=use_readout,
|
| 339 |
+
start_index=2,
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _make_vit_b_rn50_backbone(
|
| 344 |
+
model,
|
| 345 |
+
features=[256, 512, 768, 768],
|
| 346 |
+
size=[384, 384],
|
| 347 |
+
hooks=[0, 1, 8, 11],
|
| 348 |
+
vit_features=768,
|
| 349 |
+
use_vit_only=False,
|
| 350 |
+
use_readout="ignore",
|
| 351 |
+
start_index=1,
|
| 352 |
+
):
|
| 353 |
+
pretrained = nn.Module()
|
| 354 |
+
|
| 355 |
+
pretrained.model = model
|
| 356 |
+
|
| 357 |
+
if use_vit_only == True:
|
| 358 |
+
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
| 359 |
+
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
| 360 |
+
else:
|
| 361 |
+
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
|
| 362 |
+
get_activation("1")
|
| 363 |
+
)
|
| 364 |
+
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
|
| 365 |
+
get_activation("2")
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
| 369 |
+
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
| 370 |
+
|
| 371 |
+
pretrained.activations = activations
|
| 372 |
+
|
| 373 |
+
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
| 374 |
+
|
| 375 |
+
if use_vit_only == True:
|
| 376 |
+
pretrained.act_postprocess1 = nn.Sequential(
|
| 377 |
+
readout_oper[0],
|
| 378 |
+
Transpose(1, 2),
|
| 379 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 380 |
+
nn.Conv2d(
|
| 381 |
+
in_channels=vit_features,
|
| 382 |
+
out_channels=features[0],
|
| 383 |
+
kernel_size=1,
|
| 384 |
+
stride=1,
|
| 385 |
+
padding=0,
|
| 386 |
+
),
|
| 387 |
+
nn.ConvTranspose2d(
|
| 388 |
+
in_channels=features[0],
|
| 389 |
+
out_channels=features[0],
|
| 390 |
+
kernel_size=4,
|
| 391 |
+
stride=4,
|
| 392 |
+
padding=0,
|
| 393 |
+
bias=True,
|
| 394 |
+
dilation=1,
|
| 395 |
+
groups=1,
|
| 396 |
+
),
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
pretrained.act_postprocess2 = nn.Sequential(
|
| 400 |
+
readout_oper[1],
|
| 401 |
+
Transpose(1, 2),
|
| 402 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 403 |
+
nn.Conv2d(
|
| 404 |
+
in_channels=vit_features,
|
| 405 |
+
out_channels=features[1],
|
| 406 |
+
kernel_size=1,
|
| 407 |
+
stride=1,
|
| 408 |
+
padding=0,
|
| 409 |
+
),
|
| 410 |
+
nn.ConvTranspose2d(
|
| 411 |
+
in_channels=features[1],
|
| 412 |
+
out_channels=features[1],
|
| 413 |
+
kernel_size=2,
|
| 414 |
+
stride=2,
|
| 415 |
+
padding=0,
|
| 416 |
+
bias=True,
|
| 417 |
+
dilation=1,
|
| 418 |
+
groups=1,
|
| 419 |
+
),
|
| 420 |
+
)
|
| 421 |
+
else:
|
| 422 |
+
pretrained.act_postprocess1 = nn.Sequential(
|
| 423 |
+
nn.Identity(), nn.Identity(), nn.Identity()
|
| 424 |
+
)
|
| 425 |
+
pretrained.act_postprocess2 = nn.Sequential(
|
| 426 |
+
nn.Identity(), nn.Identity(), nn.Identity()
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
pretrained.act_postprocess3 = nn.Sequential(
|
| 430 |
+
readout_oper[2],
|
| 431 |
+
Transpose(1, 2),
|
| 432 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 433 |
+
nn.Conv2d(
|
| 434 |
+
in_channels=vit_features,
|
| 435 |
+
out_channels=features[2],
|
| 436 |
+
kernel_size=1,
|
| 437 |
+
stride=1,
|
| 438 |
+
padding=0,
|
| 439 |
+
),
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
pretrained.act_postprocess4 = nn.Sequential(
|
| 443 |
+
readout_oper[3],
|
| 444 |
+
Transpose(1, 2),
|
| 445 |
+
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
| 446 |
+
nn.Conv2d(
|
| 447 |
+
in_channels=vit_features,
|
| 448 |
+
out_channels=features[3],
|
| 449 |
+
kernel_size=1,
|
| 450 |
+
stride=1,
|
| 451 |
+
padding=0,
|
| 452 |
+
),
|
| 453 |
+
nn.Conv2d(
|
| 454 |
+
in_channels=features[3],
|
| 455 |
+
out_channels=features[3],
|
| 456 |
+
kernel_size=3,
|
| 457 |
+
stride=2,
|
| 458 |
+
padding=1,
|
| 459 |
+
),
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
pretrained.model.start_index = start_index
|
| 463 |
+
pretrained.model.patch_size = [16, 16]
|
| 464 |
+
|
| 465 |
+
# We inject this function into the VisionTransformer instances so that
|
| 466 |
+
# we can use it with interpolated position embeddings without modifying the library source.
|
| 467 |
+
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
| 468 |
+
|
| 469 |
+
# We inject this function into the VisionTransformer instances so that
|
| 470 |
+
# we can use it with interpolated position embeddings without modifying the library source.
|
| 471 |
+
pretrained.model._resize_pos_embed = types.MethodType(
|
| 472 |
+
_resize_pos_embed, pretrained.model
|
| 473 |
+
)
|
| 474 |
+
|
| 475 |
+
return pretrained
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def _make_pretrained_vitb_rn50_384(
|
| 479 |
+
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
|
| 480 |
+
):
|
| 481 |
+
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
|
| 482 |
+
|
| 483 |
+
hooks = [0, 1, 8, 11] if hooks == None else hooks
|
| 484 |
+
return _make_vit_b_rn50_backbone(
|
| 485 |
+
model,
|
| 486 |
+
features=[256, 512, 768, 768],
|
| 487 |
+
size=[384, 384],
|
| 488 |
+
hooks=hooks,
|
| 489 |
+
use_vit_only=use_vit_only,
|
| 490 |
+
use_readout=use_readout,
|
| 491 |
+
)
|
RAVE-main/annotator/midas/utils.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utils for monoDepth."""
|
| 2 |
+
import sys
|
| 3 |
+
import re
|
| 4 |
+
import numpy as np
|
| 5 |
+
import cv2
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def read_pfm(path):
|
| 10 |
+
"""Read pfm file.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
path (str): path to file
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
tuple: (data, scale)
|
| 17 |
+
"""
|
| 18 |
+
with open(path, "rb") as file:
|
| 19 |
+
|
| 20 |
+
color = None
|
| 21 |
+
width = None
|
| 22 |
+
height = None
|
| 23 |
+
scale = None
|
| 24 |
+
endian = None
|
| 25 |
+
|
| 26 |
+
header = file.readline().rstrip()
|
| 27 |
+
if header.decode("ascii") == "PF":
|
| 28 |
+
color = True
|
| 29 |
+
elif header.decode("ascii") == "Pf":
|
| 30 |
+
color = False
|
| 31 |
+
else:
|
| 32 |
+
raise Exception("Not a PFM file: " + path)
|
| 33 |
+
|
| 34 |
+
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
|
| 35 |
+
if dim_match:
|
| 36 |
+
width, height = list(map(int, dim_match.groups()))
|
| 37 |
+
else:
|
| 38 |
+
raise Exception("Malformed PFM header.")
|
| 39 |
+
|
| 40 |
+
scale = float(file.readline().decode("ascii").rstrip())
|
| 41 |
+
if scale < 0:
|
| 42 |
+
# little-endian
|
| 43 |
+
endian = "<"
|
| 44 |
+
scale = -scale
|
| 45 |
+
else:
|
| 46 |
+
# big-endian
|
| 47 |
+
endian = ">"
|
| 48 |
+
|
| 49 |
+
data = np.fromfile(file, endian + "f")
|
| 50 |
+
shape = (height, width, 3) if color else (height, width)
|
| 51 |
+
|
| 52 |
+
data = np.reshape(data, shape)
|
| 53 |
+
data = np.flipud(data)
|
| 54 |
+
|
| 55 |
+
return data, scale
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def write_pfm(path, image, scale=1):
|
| 59 |
+
"""Write pfm file.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
path (str): pathto file
|
| 63 |
+
image (array): data
|
| 64 |
+
scale (int, optional): Scale. Defaults to 1.
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
with open(path, "wb") as file:
|
| 68 |
+
color = None
|
| 69 |
+
|
| 70 |
+
if image.dtype.name != "float32":
|
| 71 |
+
raise Exception("Image dtype must be float32.")
|
| 72 |
+
|
| 73 |
+
image = np.flipud(image)
|
| 74 |
+
|
| 75 |
+
if len(image.shape) == 3 and image.shape[2] == 3: # color image
|
| 76 |
+
color = True
|
| 77 |
+
elif (
|
| 78 |
+
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
|
| 79 |
+
): # greyscale
|
| 80 |
+
color = False
|
| 81 |
+
else:
|
| 82 |
+
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
|
| 83 |
+
|
| 84 |
+
file.write("PF\n" if color else "Pf\n".encode())
|
| 85 |
+
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
|
| 86 |
+
|
| 87 |
+
endian = image.dtype.byteorder
|
| 88 |
+
|
| 89 |
+
if endian == "<" or endian == "=" and sys.byteorder == "little":
|
| 90 |
+
scale = -scale
|
| 91 |
+
|
| 92 |
+
file.write("%f\n".encode() % scale)
|
| 93 |
+
|
| 94 |
+
image.tofile(file)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def read_image(path):
|
| 98 |
+
"""Read image and output RGB image (0-1).
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
path (str): path to file
|
| 102 |
+
|
| 103 |
+
Returns:
|
| 104 |
+
array: RGB image (0-1)
|
| 105 |
+
"""
|
| 106 |
+
img = cv2.imread(path)
|
| 107 |
+
|
| 108 |
+
if img.ndim == 2:
|
| 109 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
| 110 |
+
|
| 111 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
|
| 112 |
+
|
| 113 |
+
return img
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def resize_image(img):
|
| 117 |
+
"""Resize image and make it fit for network.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
img (array): image
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
tensor: data ready for network
|
| 124 |
+
"""
|
| 125 |
+
height_orig = img.shape[0]
|
| 126 |
+
width_orig = img.shape[1]
|
| 127 |
+
|
| 128 |
+
if width_orig > height_orig:
|
| 129 |
+
scale = width_orig / 384
|
| 130 |
+
else:
|
| 131 |
+
scale = height_orig / 384
|
| 132 |
+
|
| 133 |
+
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
|
| 134 |
+
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
|
| 135 |
+
|
| 136 |
+
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
|
| 137 |
+
|
| 138 |
+
img_resized = (
|
| 139 |
+
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
|
| 140 |
+
)
|
| 141 |
+
img_resized = img_resized.unsqueeze(0)
|
| 142 |
+
|
| 143 |
+
return img_resized
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def resize_depth(depth, width, height):
|
| 147 |
+
"""Resize depth map and bring to CPU (numpy).
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
depth (tensor): depth
|
| 151 |
+
width (int): image width
|
| 152 |
+
height (int): image height
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
array: processed depth
|
| 156 |
+
"""
|
| 157 |
+
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
|
| 158 |
+
|
| 159 |
+
depth_resized = cv2.resize(
|
| 160 |
+
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
return depth_resized
|
| 164 |
+
|
| 165 |
+
def write_depth(path, depth, bits=1):
|
| 166 |
+
"""Write depth map to pfm and png file.
|
| 167 |
+
|
| 168 |
+
Args:
|
| 169 |
+
path (str): filepath without extension
|
| 170 |
+
depth (array): depth
|
| 171 |
+
"""
|
| 172 |
+
write_pfm(path + ".pfm", depth.astype(np.float32))
|
| 173 |
+
|
| 174 |
+
depth_min = depth.min()
|
| 175 |
+
depth_max = depth.max()
|
| 176 |
+
|
| 177 |
+
max_val = (2**(8*bits))-1
|
| 178 |
+
|
| 179 |
+
if depth_max - depth_min > np.finfo("float").eps:
|
| 180 |
+
out = max_val * (depth - depth_min) / (depth_max - depth_min)
|
| 181 |
+
else:
|
| 182 |
+
out = np.zeros(depth.shape, dtype=depth.type)
|
| 183 |
+
|
| 184 |
+
if bits == 1:
|
| 185 |
+
cv2.imwrite(path + ".png", out.astype("uint8"))
|
| 186 |
+
elif bits == 2:
|
| 187 |
+
cv2.imwrite(path + ".png", out.astype("uint16"))
|
| 188 |
+
|
| 189 |
+
return
|
RAVE-main/annotator/mmpkg/mmcv/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
# flake8: noqa
|
| 3 |
+
from .arraymisc import *
|
| 4 |
+
from .fileio import *
|
| 5 |
+
from .image import *
|
| 6 |
+
from .utils import *
|
| 7 |
+
from .version import *
|
| 8 |
+
from .video import *
|
| 9 |
+
from .visualization import *
|
| 10 |
+
|
| 11 |
+
# The following modules are not imported to this level, so mmcv may be used
|
| 12 |
+
# without PyTorch.
|
| 13 |
+
# - runner
|
| 14 |
+
# - parallel
|
| 15 |
+
# - op
|
RAVE-main/annotator/mmpkg/mmcv/engine/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .test import (collect_results_cpu, collect_results_gpu, multi_gpu_test,
|
| 3 |
+
single_gpu_test)
|
| 4 |
+
|
| 5 |
+
__all__ = [
|
| 6 |
+
'collect_results_cpu', 'collect_results_gpu', 'multi_gpu_test',
|
| 7 |
+
'single_gpu_test'
|
| 8 |
+
]
|
RAVE-main/annotator/mmpkg/mmcv/fileio/parse.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
|
| 3 |
+
from io import StringIO
|
| 4 |
+
|
| 5 |
+
from .file_client import FileClient
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def list_from_file(filename,
|
| 9 |
+
prefix='',
|
| 10 |
+
offset=0,
|
| 11 |
+
max_num=0,
|
| 12 |
+
encoding='utf-8',
|
| 13 |
+
file_client_args=None):
|
| 14 |
+
"""Load a text file and parse the content as a list of strings.
|
| 15 |
+
|
| 16 |
+
Note:
|
| 17 |
+
In v1.3.16 and later, ``list_from_file`` supports loading a text file
|
| 18 |
+
which can be storaged in different backends and parsing the content as
|
| 19 |
+
a list for strings.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
filename (str): Filename.
|
| 23 |
+
prefix (str): The prefix to be inserted to the beginning of each item.
|
| 24 |
+
offset (int): The offset of lines.
|
| 25 |
+
max_num (int): The maximum number of lines to be read,
|
| 26 |
+
zeros and negatives mean no limitation.
|
| 27 |
+
encoding (str): Encoding used to open the file. Default utf-8.
|
| 28 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 29 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 30 |
+
Default: None.
|
| 31 |
+
|
| 32 |
+
Examples:
|
| 33 |
+
>>> list_from_file('/path/of/your/file') # disk
|
| 34 |
+
['hello', 'world']
|
| 35 |
+
>>> list_from_file('s3://path/of/your/file') # ceph or petrel
|
| 36 |
+
['hello', 'world']
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
list[str]: A list of strings.
|
| 40 |
+
"""
|
| 41 |
+
cnt = 0
|
| 42 |
+
item_list = []
|
| 43 |
+
file_client = FileClient.infer_client(file_client_args, filename)
|
| 44 |
+
with StringIO(file_client.get_text(filename, encoding)) as f:
|
| 45 |
+
for _ in range(offset):
|
| 46 |
+
f.readline()
|
| 47 |
+
for line in f:
|
| 48 |
+
if 0 < max_num <= cnt:
|
| 49 |
+
break
|
| 50 |
+
item_list.append(prefix + line.rstrip('\n\r'))
|
| 51 |
+
cnt += 1
|
| 52 |
+
return item_list
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def dict_from_file(filename,
|
| 56 |
+
key_type=str,
|
| 57 |
+
encoding='utf-8',
|
| 58 |
+
file_client_args=None):
|
| 59 |
+
"""Load a text file and parse the content as a dict.
|
| 60 |
+
|
| 61 |
+
Each line of the text file will be two or more columns split by
|
| 62 |
+
whitespaces or tabs. The first column will be parsed as dict keys, and
|
| 63 |
+
the following columns will be parsed as dict values.
|
| 64 |
+
|
| 65 |
+
Note:
|
| 66 |
+
In v1.3.16 and later, ``dict_from_file`` supports loading a text file
|
| 67 |
+
which can be storaged in different backends and parsing the content as
|
| 68 |
+
a dict.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
filename(str): Filename.
|
| 72 |
+
key_type(type): Type of the dict keys. str is user by default and
|
| 73 |
+
type conversion will be performed if specified.
|
| 74 |
+
encoding (str): Encoding used to open the file. Default utf-8.
|
| 75 |
+
file_client_args (dict, optional): Arguments to instantiate a
|
| 76 |
+
FileClient. See :class:`mmcv.fileio.FileClient` for details.
|
| 77 |
+
Default: None.
|
| 78 |
+
|
| 79 |
+
Examples:
|
| 80 |
+
>>> dict_from_file('/path/of/your/file') # disk
|
| 81 |
+
{'key1': 'value1', 'key2': 'value2'}
|
| 82 |
+
>>> dict_from_file('s3://path/of/your/file') # ceph or petrel
|
| 83 |
+
{'key1': 'value1', 'key2': 'value2'}
|
| 84 |
+
|
| 85 |
+
Returns:
|
| 86 |
+
dict: The parsed contents.
|
| 87 |
+
"""
|
| 88 |
+
mapping = {}
|
| 89 |
+
file_client = FileClient.infer_client(file_client_args, filename)
|
| 90 |
+
with StringIO(file_client.get_text(filename, encoding)) as f:
|
| 91 |
+
for line in f:
|
| 92 |
+
items = line.rstrip('\n').split()
|
| 93 |
+
assert len(items) >= 2
|
| 94 |
+
key = key_type(items[0])
|
| 95 |
+
val = items[1:] if len(items) > 2 else items[1]
|
| 96 |
+
mapping[key] = val
|
| 97 |
+
return mapping
|
RAVE-main/annotator/mmpkg/mmcv/parallel/data_container.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import functools
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def assert_tensor_type(func):
|
| 8 |
+
|
| 9 |
+
@functools.wraps(func)
|
| 10 |
+
def wrapper(*args, **kwargs):
|
| 11 |
+
if not isinstance(args[0].data, torch.Tensor):
|
| 12 |
+
raise AttributeError(
|
| 13 |
+
f'{args[0].__class__.__name__} has no attribute '
|
| 14 |
+
f'{func.__name__} for type {args[0].datatype}')
|
| 15 |
+
return func(*args, **kwargs)
|
| 16 |
+
|
| 17 |
+
return wrapper
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class DataContainer:
|
| 21 |
+
"""A container for any type of objects.
|
| 22 |
+
|
| 23 |
+
Typically tensors will be stacked in the collate function and sliced along
|
| 24 |
+
some dimension in the scatter function. This behavior has some limitations.
|
| 25 |
+
1. All tensors have to be the same size.
|
| 26 |
+
2. Types are limited (numpy array or Tensor).
|
| 27 |
+
|
| 28 |
+
We design `DataContainer` and `MMDataParallel` to overcome these
|
| 29 |
+
limitations. The behavior can be either of the following.
|
| 30 |
+
|
| 31 |
+
- copy to GPU, pad all tensors to the same size and stack them
|
| 32 |
+
- copy to GPU without stacking
|
| 33 |
+
- leave the objects as is and pass it to the model
|
| 34 |
+
- pad_dims specifies the number of last few dimensions to do padding
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self,
|
| 38 |
+
data,
|
| 39 |
+
stack=False,
|
| 40 |
+
padding_value=0,
|
| 41 |
+
cpu_only=False,
|
| 42 |
+
pad_dims=2):
|
| 43 |
+
self._data = data
|
| 44 |
+
self._cpu_only = cpu_only
|
| 45 |
+
self._stack = stack
|
| 46 |
+
self._padding_value = padding_value
|
| 47 |
+
assert pad_dims in [None, 1, 2, 3]
|
| 48 |
+
self._pad_dims = pad_dims
|
| 49 |
+
|
| 50 |
+
def __repr__(self):
|
| 51 |
+
return f'{self.__class__.__name__}({repr(self.data)})'
|
| 52 |
+
|
| 53 |
+
def __len__(self):
|
| 54 |
+
return len(self._data)
|
| 55 |
+
|
| 56 |
+
@property
|
| 57 |
+
def data(self):
|
| 58 |
+
return self._data
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def datatype(self):
|
| 62 |
+
if isinstance(self.data, torch.Tensor):
|
| 63 |
+
return self.data.type()
|
| 64 |
+
else:
|
| 65 |
+
return type(self.data)
|
| 66 |
+
|
| 67 |
+
@property
|
| 68 |
+
def cpu_only(self):
|
| 69 |
+
return self._cpu_only
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def stack(self):
|
| 73 |
+
return self._stack
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def padding_value(self):
|
| 77 |
+
return self._padding_value
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def pad_dims(self):
|
| 81 |
+
return self._pad_dims
|
| 82 |
+
|
| 83 |
+
@assert_tensor_type
|
| 84 |
+
def size(self, *args, **kwargs):
|
| 85 |
+
return self.data.size(*args, **kwargs)
|
| 86 |
+
|
| 87 |
+
@assert_tensor_type
|
| 88 |
+
def dim(self):
|
| 89 |
+
return self.data.dim()
|
RAVE-main/annotator/mmpkg/mmcv/parallel/distributed_deprecated.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
from torch._utils import (_flatten_dense_tensors, _take_tensors,
|
| 6 |
+
_unflatten_dense_tensors)
|
| 7 |
+
|
| 8 |
+
from annotator.mmpkg.mmcv.utils import TORCH_VERSION, digit_version
|
| 9 |
+
from .registry import MODULE_WRAPPERS
|
| 10 |
+
from .scatter_gather import scatter_kwargs
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@MODULE_WRAPPERS.register_module()
|
| 14 |
+
class MMDistributedDataParallel(nn.Module):
|
| 15 |
+
|
| 16 |
+
def __init__(self,
|
| 17 |
+
module,
|
| 18 |
+
dim=0,
|
| 19 |
+
broadcast_buffers=True,
|
| 20 |
+
bucket_cap_mb=25):
|
| 21 |
+
super(MMDistributedDataParallel, self).__init__()
|
| 22 |
+
self.module = module
|
| 23 |
+
self.dim = dim
|
| 24 |
+
self.broadcast_buffers = broadcast_buffers
|
| 25 |
+
|
| 26 |
+
self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024
|
| 27 |
+
self._sync_params()
|
| 28 |
+
|
| 29 |
+
def _dist_broadcast_coalesced(self, tensors, buffer_size):
|
| 30 |
+
for tensors in _take_tensors(tensors, buffer_size):
|
| 31 |
+
flat_tensors = _flatten_dense_tensors(tensors)
|
| 32 |
+
dist.broadcast(flat_tensors, 0)
|
| 33 |
+
for tensor, synced in zip(
|
| 34 |
+
tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
|
| 35 |
+
tensor.copy_(synced)
|
| 36 |
+
|
| 37 |
+
def _sync_params(self):
|
| 38 |
+
module_states = list(self.module.state_dict().values())
|
| 39 |
+
if len(module_states) > 0:
|
| 40 |
+
self._dist_broadcast_coalesced(module_states,
|
| 41 |
+
self.broadcast_bucket_size)
|
| 42 |
+
if self.broadcast_buffers:
|
| 43 |
+
if (TORCH_VERSION != 'parrots'
|
| 44 |
+
and digit_version(TORCH_VERSION) < digit_version('1.0')):
|
| 45 |
+
buffers = [b.data for b in self.module._all_buffers()]
|
| 46 |
+
else:
|
| 47 |
+
buffers = [b.data for b in self.module.buffers()]
|
| 48 |
+
if len(buffers) > 0:
|
| 49 |
+
self._dist_broadcast_coalesced(buffers,
|
| 50 |
+
self.broadcast_bucket_size)
|
| 51 |
+
|
| 52 |
+
def scatter(self, inputs, kwargs, device_ids):
|
| 53 |
+
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
|
| 54 |
+
|
| 55 |
+
def forward(self, *inputs, **kwargs):
|
| 56 |
+
inputs, kwargs = self.scatter(inputs, kwargs,
|
| 57 |
+
[torch.cuda.current_device()])
|
| 58 |
+
return self.module(*inputs[0], **kwargs[0])
|
| 59 |
+
|
| 60 |
+
def train_step(self, *inputs, **kwargs):
|
| 61 |
+
inputs, kwargs = self.scatter(inputs, kwargs,
|
| 62 |
+
[torch.cuda.current_device()])
|
| 63 |
+
output = self.module.train_step(*inputs[0], **kwargs[0])
|
| 64 |
+
return output
|
| 65 |
+
|
| 66 |
+
def val_step(self, *inputs, **kwargs):
|
| 67 |
+
inputs, kwargs = self.scatter(inputs, kwargs,
|
| 68 |
+
[torch.cuda.current_device()])
|
| 69 |
+
output = self.module.val_step(*inputs[0], **kwargs[0])
|
| 70 |
+
return output
|
RAVE-main/annotator/mmpkg/mmcv/parallel/scatter_gather.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
import torch
|
| 3 |
+
from torch.nn.parallel._functions import Scatter as OrigScatter
|
| 4 |
+
|
| 5 |
+
from ._functions import Scatter
|
| 6 |
+
from .data_container import DataContainer
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def scatter(inputs, target_gpus, dim=0):
|
| 10 |
+
"""Scatter inputs to target gpus.
|
| 11 |
+
|
| 12 |
+
The only difference from original :func:`scatter` is to add support for
|
| 13 |
+
:type:`~mmcv.parallel.DataContainer`.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def scatter_map(obj):
|
| 17 |
+
if isinstance(obj, torch.Tensor):
|
| 18 |
+
if target_gpus != [-1]:
|
| 19 |
+
return OrigScatter.apply(target_gpus, None, dim, obj)
|
| 20 |
+
else:
|
| 21 |
+
# for CPU inference we use self-implemented scatter
|
| 22 |
+
return Scatter.forward(target_gpus, obj)
|
| 23 |
+
if isinstance(obj, DataContainer):
|
| 24 |
+
if obj.cpu_only:
|
| 25 |
+
return obj.data
|
| 26 |
+
else:
|
| 27 |
+
return Scatter.forward(target_gpus, obj.data)
|
| 28 |
+
if isinstance(obj, tuple) and len(obj) > 0:
|
| 29 |
+
return list(zip(*map(scatter_map, obj)))
|
| 30 |
+
if isinstance(obj, list) and len(obj) > 0:
|
| 31 |
+
out = list(map(list, zip(*map(scatter_map, obj))))
|
| 32 |
+
return out
|
| 33 |
+
if isinstance(obj, dict) and len(obj) > 0:
|
| 34 |
+
out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
|
| 35 |
+
return out
|
| 36 |
+
return [obj for targets in target_gpus]
|
| 37 |
+
|
| 38 |
+
# After scatter_map is called, a scatter_map cell will exist. This cell
|
| 39 |
+
# has a reference to the actual function scatter_map, which has references
|
| 40 |
+
# to a closure that has a reference to the scatter_map cell (because the
|
| 41 |
+
# fn is recursive). To avoid this reference cycle, we set the function to
|
| 42 |
+
# None, clearing the cell
|
| 43 |
+
try:
|
| 44 |
+
return scatter_map(inputs)
|
| 45 |
+
finally:
|
| 46 |
+
scatter_map = None
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
|
| 50 |
+
"""Scatter with support for kwargs dictionary."""
|
| 51 |
+
inputs = scatter(inputs, target_gpus, dim) if inputs else []
|
| 52 |
+
kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
|
| 53 |
+
if len(inputs) < len(kwargs):
|
| 54 |
+
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
|
| 55 |
+
elif len(kwargs) < len(inputs):
|
| 56 |
+
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
|
| 57 |
+
inputs = tuple(inputs)
|
| 58 |
+
kwargs = tuple(kwargs)
|
| 59 |
+
return inputs, kwargs
|
RAVE-main/annotator/mmpkg/mmcv/parallel/utils.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
from .registry import MODULE_WRAPPERS
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def is_module_wrapper(module):
|
| 6 |
+
"""Check if a module is a module wrapper.
|
| 7 |
+
|
| 8 |
+
The following 3 modules in MMCV (and their subclasses) are regarded as
|
| 9 |
+
module wrappers: DataParallel, DistributedDataParallel,
|
| 10 |
+
MMDistributedDataParallel (the deprecated version). You may add you own
|
| 11 |
+
module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
module (nn.Module): The module to be checked.
|
| 15 |
+
|
| 16 |
+
Returns:
|
| 17 |
+
bool: True if the input module is a module wrapper.
|
| 18 |
+
"""
|
| 19 |
+
module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values())
|
| 20 |
+
return isinstance(module, module_wrappers)
|
RAVE-main/annotator/mmpkg/mmcv/version.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) OpenMMLab. All rights reserved.
|
| 2 |
+
__version__ = '1.3.17'
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def parse_version_info(version_str: str, length: int = 4) -> tuple:
|
| 6 |
+
"""Parse a version string into a tuple.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
version_str (str): The version string.
|
| 10 |
+
length (int): The maximum number of version levels. Default: 4.
|
| 11 |
+
|
| 12 |
+
Returns:
|
| 13 |
+
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
|
| 14 |
+
(1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into
|
| 15 |
+
(2, 0, 0, 0, 'rc', 1) (when length is set to 4).
|
| 16 |
+
"""
|
| 17 |
+
from packaging.version import parse
|
| 18 |
+
version = parse(version_str)
|
| 19 |
+
assert version.release, f'failed to parse version {version_str}'
|
| 20 |
+
release = list(version.release)
|
| 21 |
+
release = release[:length]
|
| 22 |
+
if len(release) < length:
|
| 23 |
+
release = release + [0] * (length - len(release))
|
| 24 |
+
if version.is_prerelease:
|
| 25 |
+
release.extend(list(version.pre))
|
| 26 |
+
elif version.is_postrelease:
|
| 27 |
+
release.extend(list(version.post))
|
| 28 |
+
else:
|
| 29 |
+
release.extend([0, 0])
|
| 30 |
+
return tuple(release)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
version_info = tuple(int(x) for x in __version__.split('.')[:3])
|
| 34 |
+
|
| 35 |
+
__all__ = ['__version__', 'version_info', 'parse_version_info']
|
RAVE-main/annotator/oneformer/detectron2/export/README.md
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
This directory contains code to prepare a detectron2 model for deployment.
|
| 3 |
+
Currently it supports exporting a detectron2 model to TorchScript, ONNX, or (deprecated) Caffe2 format.
|
| 4 |
+
|
| 5 |
+
Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
### Acknowledgements
|
| 9 |
+
|
| 10 |
+
Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools.
|
| 11 |
+
|
| 12 |
+
Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who
|
| 13 |
+
help export Detectron2 models to TorchScript.
|
| 14 |
+
|
| 15 |
+
Thanks to ONNX Converter team at Microsoft who help export Detectron2 models to ONNX.
|
RAVE-main/annotator/oneformer/detectron2/export/__init__.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
|
| 3 |
+
import warnings
|
| 4 |
+
|
| 5 |
+
from .flatten import TracingAdapter
|
| 6 |
+
from .torchscript import dump_torchscript_IR, scripting_with_instances
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
from caffe2.proto import caffe2_pb2 as _tmp
|
| 10 |
+
from caffe2.python import core
|
| 11 |
+
|
| 12 |
+
# caffe2 is optional
|
| 13 |
+
except ImportError:
|
| 14 |
+
pass
|
| 15 |
+
else:
|
| 16 |
+
from .api import *
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# TODO: Update ONNX Opset version and run tests when a newer PyTorch is supported
|
| 20 |
+
STABLE_ONNX_OPSET_VERSION = 11
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def add_export_config(cfg):
|
| 24 |
+
warnings.warn(
|
| 25 |
+
"add_export_config has been deprecated and behaves as no-op function.", DeprecationWarning
|
| 26 |
+
)
|
| 27 |
+
return cfg
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
RAVE-main/annotator/oneformer/detectron2/export/api.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
import copy
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
import torch
|
| 6 |
+
from caffe2.proto import caffe2_pb2
|
| 7 |
+
from torch import nn
|
| 8 |
+
|
| 9 |
+
from annotator.oneformer.detectron2.config import CfgNode
|
| 10 |
+
from annotator.oneformer.detectron2.utils.file_io import PathManager
|
| 11 |
+
|
| 12 |
+
from .caffe2_inference import ProtobufDetectionModel
|
| 13 |
+
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
|
| 14 |
+
from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"Caffe2Model",
|
| 18 |
+
"Caffe2Tracer",
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class Caffe2Tracer:
|
| 23 |
+
"""
|
| 24 |
+
Make a detectron2 model traceable with Caffe2 operators.
|
| 25 |
+
This class creates a traceable version of a detectron2 model which:
|
| 26 |
+
|
| 27 |
+
1. Rewrite parts of the model using ops in Caffe2. Note that some ops do
|
| 28 |
+
not have GPU implementation in Caffe2.
|
| 29 |
+
2. Remove post-processing and only produce raw layer outputs
|
| 30 |
+
|
| 31 |
+
After making a traceable model, the class provide methods to export such a
|
| 32 |
+
model to different deployment formats.
|
| 33 |
+
Exported graph produced by this class take two input tensors:
|
| 34 |
+
|
| 35 |
+
1. (1, C, H, W) float "data" which is an image (usually in [0, 255]).
|
| 36 |
+
(H, W) often has to be padded to multiple of 32 (depend on the model
|
| 37 |
+
architecture).
|
| 38 |
+
2. 1x3 float "im_info", each row of which is (height, width, 1.0).
|
| 39 |
+
Height and width are true image shapes before padding.
|
| 40 |
+
|
| 41 |
+
The class currently only supports models using builtin meta architectures.
|
| 42 |
+
Batch inference is not supported, and contributions are welcome.
|
| 43 |
+
"""
|
| 44 |
+
|
| 45 |
+
def __init__(self, cfg: CfgNode, model: nn.Module, inputs):
|
| 46 |
+
"""
|
| 47 |
+
Args:
|
| 48 |
+
cfg (CfgNode): a detectron2 config used to construct caffe2-compatible model.
|
| 49 |
+
model (nn.Module): An original pytorch model. Must be among a few official models
|
| 50 |
+
in detectron2 that can be converted to become caffe2-compatible automatically.
|
| 51 |
+
Weights have to be already loaded to this model.
|
| 52 |
+
inputs: sample inputs that the given model takes for inference.
|
| 53 |
+
Will be used to trace the model. For most models, random inputs with
|
| 54 |
+
no detected objects will not work as they lead to wrong traces.
|
| 55 |
+
"""
|
| 56 |
+
assert isinstance(cfg, CfgNode), cfg
|
| 57 |
+
assert isinstance(model, torch.nn.Module), type(model)
|
| 58 |
+
|
| 59 |
+
# TODO make it support custom models, by passing in c2 model directly
|
| 60 |
+
C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
|
| 61 |
+
self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model))
|
| 62 |
+
self.inputs = inputs
|
| 63 |
+
self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs)
|
| 64 |
+
|
| 65 |
+
def export_caffe2(self):
|
| 66 |
+
"""
|
| 67 |
+
Export the model to Caffe2's protobuf format.
|
| 68 |
+
The returned object can be saved with its :meth:`.save_protobuf()` method.
|
| 69 |
+
The result can be loaded and executed using Caffe2 runtime.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
:class:`Caffe2Model`
|
| 73 |
+
"""
|
| 74 |
+
from .caffe2_export import export_caffe2_detection_model
|
| 75 |
+
|
| 76 |
+
predict_net, init_net = export_caffe2_detection_model(
|
| 77 |
+
self.traceable_model, self.traceable_inputs
|
| 78 |
+
)
|
| 79 |
+
return Caffe2Model(predict_net, init_net)
|
| 80 |
+
|
| 81 |
+
def export_onnx(self):
|
| 82 |
+
"""
|
| 83 |
+
Export the model to ONNX format.
|
| 84 |
+
Note that the exported model contains custom ops only available in caffe2, therefore it
|
| 85 |
+
cannot be directly executed by other runtime (such as onnxruntime or TensorRT).
|
| 86 |
+
Post-processing or transformation passes may be applied on the model to accommodate
|
| 87 |
+
different runtimes, but we currently do not provide support for them.
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
onnx.ModelProto: an onnx model.
|
| 91 |
+
"""
|
| 92 |
+
from .caffe2_export import export_onnx_model as export_onnx_model_impl
|
| 93 |
+
|
| 94 |
+
return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,))
|
| 95 |
+
|
| 96 |
+
def export_torchscript(self):
|
| 97 |
+
"""
|
| 98 |
+
Export the model to a ``torch.jit.TracedModule`` by tracing.
|
| 99 |
+
The returned object can be saved to a file by ``.save()``.
|
| 100 |
+
|
| 101 |
+
Returns:
|
| 102 |
+
torch.jit.TracedModule: a torch TracedModule
|
| 103 |
+
"""
|
| 104 |
+
logger = logging.getLogger(__name__)
|
| 105 |
+
logger.info("Tracing the model with torch.jit.trace ...")
|
| 106 |
+
with torch.no_grad():
|
| 107 |
+
return torch.jit.trace(self.traceable_model, (self.traceable_inputs,))
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
class Caffe2Model(nn.Module):
|
| 111 |
+
"""
|
| 112 |
+
A wrapper around the traced model in Caffe2's protobuf format.
|
| 113 |
+
The exported graph has different inputs/outputs from the original Pytorch
|
| 114 |
+
model, as explained in :class:`Caffe2Tracer`. This class wraps around the
|
| 115 |
+
exported graph to simulate the same interface as the original Pytorch model.
|
| 116 |
+
It also provides functions to save/load models in Caffe2's format.'
|
| 117 |
+
|
| 118 |
+
Examples:
|
| 119 |
+
::
|
| 120 |
+
c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2()
|
| 121 |
+
inputs = [{"image": img_tensor_CHW}]
|
| 122 |
+
outputs = c2_model(inputs)
|
| 123 |
+
orig_outputs = torch_model(inputs)
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
def __init__(self, predict_net, init_net):
|
| 127 |
+
super().__init__()
|
| 128 |
+
self.eval() # always in eval mode
|
| 129 |
+
self._predict_net = predict_net
|
| 130 |
+
self._init_net = init_net
|
| 131 |
+
self._predictor = None
|
| 132 |
+
|
| 133 |
+
__init__.__HIDE_SPHINX_DOC__ = True
|
| 134 |
+
|
| 135 |
+
@property
|
| 136 |
+
def predict_net(self):
|
| 137 |
+
"""
|
| 138 |
+
caffe2.core.Net: the underlying caffe2 predict net
|
| 139 |
+
"""
|
| 140 |
+
return self._predict_net
|
| 141 |
+
|
| 142 |
+
@property
|
| 143 |
+
def init_net(self):
|
| 144 |
+
"""
|
| 145 |
+
caffe2.core.Net: the underlying caffe2 init net
|
| 146 |
+
"""
|
| 147 |
+
return self._init_net
|
| 148 |
+
|
| 149 |
+
def save_protobuf(self, output_dir):
|
| 150 |
+
"""
|
| 151 |
+
Save the model as caffe2's protobuf format.
|
| 152 |
+
It saves the following files:
|
| 153 |
+
|
| 154 |
+
* "model.pb": definition of the graph. Can be visualized with
|
| 155 |
+
tools like `netron <https://github.com/lutzroeder/netron>`_.
|
| 156 |
+
* "model_init.pb": model parameters
|
| 157 |
+
* "model.pbtxt": human-readable definition of the graph. Not
|
| 158 |
+
needed for deployment.
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
output_dir (str): the output directory to save protobuf files.
|
| 162 |
+
"""
|
| 163 |
+
logger = logging.getLogger(__name__)
|
| 164 |
+
logger.info("Saving model to {} ...".format(output_dir))
|
| 165 |
+
if not PathManager.exists(output_dir):
|
| 166 |
+
PathManager.mkdirs(output_dir)
|
| 167 |
+
|
| 168 |
+
with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f:
|
| 169 |
+
f.write(self._predict_net.SerializeToString())
|
| 170 |
+
with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f:
|
| 171 |
+
f.write(str(self._predict_net))
|
| 172 |
+
with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f:
|
| 173 |
+
f.write(self._init_net.SerializeToString())
|
| 174 |
+
|
| 175 |
+
def save_graph(self, output_file, inputs=None):
|
| 176 |
+
"""
|
| 177 |
+
Save the graph as SVG format.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
output_file (str): a SVG file
|
| 181 |
+
inputs: optional inputs given to the model.
|
| 182 |
+
If given, the inputs will be used to run the graph to record
|
| 183 |
+
shape of every tensor. The shape information will be
|
| 184 |
+
saved together with the graph.
|
| 185 |
+
"""
|
| 186 |
+
from .caffe2_export import run_and_save_graph
|
| 187 |
+
|
| 188 |
+
if inputs is None:
|
| 189 |
+
save_graph(self._predict_net, output_file, op_only=False)
|
| 190 |
+
else:
|
| 191 |
+
size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0)
|
| 192 |
+
device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii")
|
| 193 |
+
inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device)
|
| 194 |
+
inputs = [x.cpu().numpy() for x in inputs]
|
| 195 |
+
run_and_save_graph(self._predict_net, self._init_net, inputs, output_file)
|
| 196 |
+
|
| 197 |
+
@staticmethod
|
| 198 |
+
def load_protobuf(dir):
|
| 199 |
+
"""
|
| 200 |
+
Args:
|
| 201 |
+
dir (str): a directory used to save Caffe2Model with
|
| 202 |
+
:meth:`save_protobuf`.
|
| 203 |
+
The files "model.pb" and "model_init.pb" are needed.
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
Caffe2Model: the caffe2 model loaded from this directory.
|
| 207 |
+
"""
|
| 208 |
+
predict_net = caffe2_pb2.NetDef()
|
| 209 |
+
with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f:
|
| 210 |
+
predict_net.ParseFromString(f.read())
|
| 211 |
+
|
| 212 |
+
init_net = caffe2_pb2.NetDef()
|
| 213 |
+
with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f:
|
| 214 |
+
init_net.ParseFromString(f.read())
|
| 215 |
+
|
| 216 |
+
return Caffe2Model(predict_net, init_net)
|
| 217 |
+
|
| 218 |
+
def __call__(self, inputs):
|
| 219 |
+
"""
|
| 220 |
+
An interface that wraps around a Caffe2 model and mimics detectron2's models'
|
| 221 |
+
input/output format. See details about the format at :doc:`/tutorials/models`.
|
| 222 |
+
This is used to compare the outputs of caffe2 model with its original torch model.
|
| 223 |
+
|
| 224 |
+
Due to the extra conversion between Pytorch/Caffe2, this method is not meant for
|
| 225 |
+
benchmark. Because of the conversion, this method also has dependency
|
| 226 |
+
on detectron2 in order to convert to detectron2's output format.
|
| 227 |
+
"""
|
| 228 |
+
if self._predictor is None:
|
| 229 |
+
self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net)
|
| 230 |
+
return self._predictor(inputs)
|
RAVE-main/annotator/oneformer/detectron2/export/c10.py
ADDED
|
@@ -0,0 +1,557 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
from typing import Dict
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn.functional as F
|
| 7 |
+
|
| 8 |
+
from annotator.oneformer.detectron2.layers import ShapeSpec, cat
|
| 9 |
+
from annotator.oneformer.detectron2.layers.roi_align_rotated import ROIAlignRotated
|
| 10 |
+
from annotator.oneformer.detectron2.modeling import poolers
|
| 11 |
+
from annotator.oneformer.detectron2.modeling.proposal_generator import rpn
|
| 12 |
+
from annotator.oneformer.detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference
|
| 13 |
+
from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, Keypoints, RotatedBoxes
|
| 14 |
+
|
| 15 |
+
from .shared import alias, to_device
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
"""
|
| 19 |
+
This file contains caffe2-compatible implementation of several detectron2 components.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class Caffe2Boxes(Boxes):
|
| 24 |
+
"""
|
| 25 |
+
Representing a list of detectron2.structures.Boxes from minibatch, each box
|
| 26 |
+
is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector
|
| 27 |
+
(batch index + 5 coordinates) for RotatedBoxes.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def __init__(self, tensor):
|
| 31 |
+
assert isinstance(tensor, torch.Tensor)
|
| 32 |
+
assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size()
|
| 33 |
+
# TODO: make tensor immutable when dim is Nx5 for Boxes,
|
| 34 |
+
# and Nx6 for RotatedBoxes?
|
| 35 |
+
self.tensor = tensor
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# TODO clean up this class, maybe just extend Instances
|
| 39 |
+
class InstancesList(object):
|
| 40 |
+
"""
|
| 41 |
+
Tensor representation of a list of Instances object for a batch of images.
|
| 42 |
+
|
| 43 |
+
When dealing with a batch of images with Caffe2 ops, a list of bboxes
|
| 44 |
+
(instances) are usually represented by single Tensor with size
|
| 45 |
+
(sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is
|
| 46 |
+
for providing common functions to convert between these two representations.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
def __init__(self, im_info, indices, extra_fields=None):
|
| 50 |
+
# [N, 3] -> (H, W, Scale)
|
| 51 |
+
self.im_info = im_info
|
| 52 |
+
# [N,] -> indice of batch to which the instance belongs
|
| 53 |
+
self.indices = indices
|
| 54 |
+
# [N, ...]
|
| 55 |
+
self.batch_extra_fields = extra_fields or {}
|
| 56 |
+
|
| 57 |
+
self.image_size = self.im_info
|
| 58 |
+
|
| 59 |
+
def get_fields(self):
|
| 60 |
+
"""like `get_fields` in the Instances object,
|
| 61 |
+
but return each field in tensor representations"""
|
| 62 |
+
ret = {}
|
| 63 |
+
for k, v in self.batch_extra_fields.items():
|
| 64 |
+
# if isinstance(v, torch.Tensor):
|
| 65 |
+
# tensor_rep = v
|
| 66 |
+
# elif isinstance(v, (Boxes, Keypoints)):
|
| 67 |
+
# tensor_rep = v.tensor
|
| 68 |
+
# else:
|
| 69 |
+
# raise ValueError("Can't find tensor representation for: {}".format())
|
| 70 |
+
ret[k] = v
|
| 71 |
+
return ret
|
| 72 |
+
|
| 73 |
+
def has(self, name):
|
| 74 |
+
return name in self.batch_extra_fields
|
| 75 |
+
|
| 76 |
+
def set(self, name, value):
|
| 77 |
+
# len(tensor) is a bad practice that generates ONNX constants during tracing.
|
| 78 |
+
# Although not a problem for the `assert` statement below, torch ONNX exporter
|
| 79 |
+
# still raises a misleading warning as it does not this call comes from `assert`
|
| 80 |
+
if isinstance(value, Boxes):
|
| 81 |
+
data_len = value.tensor.shape[0]
|
| 82 |
+
elif isinstance(value, torch.Tensor):
|
| 83 |
+
data_len = value.shape[0]
|
| 84 |
+
else:
|
| 85 |
+
data_len = len(value)
|
| 86 |
+
if len(self.batch_extra_fields):
|
| 87 |
+
assert (
|
| 88 |
+
len(self) == data_len
|
| 89 |
+
), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
|
| 90 |
+
self.batch_extra_fields[name] = value
|
| 91 |
+
|
| 92 |
+
def __getattr__(self, name):
|
| 93 |
+
if name not in self.batch_extra_fields:
|
| 94 |
+
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
|
| 95 |
+
return self.batch_extra_fields[name]
|
| 96 |
+
|
| 97 |
+
def __len__(self):
|
| 98 |
+
return len(self.indices)
|
| 99 |
+
|
| 100 |
+
def flatten(self):
|
| 101 |
+
ret = []
|
| 102 |
+
for _, v in self.batch_extra_fields.items():
|
| 103 |
+
if isinstance(v, (Boxes, Keypoints)):
|
| 104 |
+
ret.append(v.tensor)
|
| 105 |
+
else:
|
| 106 |
+
ret.append(v)
|
| 107 |
+
return ret
|
| 108 |
+
|
| 109 |
+
@staticmethod
|
| 110 |
+
def to_d2_instances_list(instances_list):
|
| 111 |
+
"""
|
| 112 |
+
Convert InstancesList to List[Instances]. The input `instances_list` can
|
| 113 |
+
also be a List[Instances], in this case this method is a non-op.
|
| 114 |
+
"""
|
| 115 |
+
if not isinstance(instances_list, InstancesList):
|
| 116 |
+
assert all(isinstance(x, Instances) for x in instances_list)
|
| 117 |
+
return instances_list
|
| 118 |
+
|
| 119 |
+
ret = []
|
| 120 |
+
for i, info in enumerate(instances_list.im_info):
|
| 121 |
+
instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())]))
|
| 122 |
+
|
| 123 |
+
ids = instances_list.indices == i
|
| 124 |
+
for k, v in instances_list.batch_extra_fields.items():
|
| 125 |
+
if isinstance(v, torch.Tensor):
|
| 126 |
+
instances.set(k, v[ids])
|
| 127 |
+
continue
|
| 128 |
+
elif isinstance(v, Boxes):
|
| 129 |
+
instances.set(k, v[ids, -4:])
|
| 130 |
+
continue
|
| 131 |
+
|
| 132 |
+
target_type, tensor_source = v
|
| 133 |
+
assert isinstance(tensor_source, torch.Tensor)
|
| 134 |
+
assert tensor_source.shape[0] == instances_list.indices.shape[0]
|
| 135 |
+
tensor_source = tensor_source[ids]
|
| 136 |
+
|
| 137 |
+
if issubclass(target_type, Boxes):
|
| 138 |
+
instances.set(k, Boxes(tensor_source[:, -4:]))
|
| 139 |
+
elif issubclass(target_type, Keypoints):
|
| 140 |
+
instances.set(k, Keypoints(tensor_source))
|
| 141 |
+
elif issubclass(target_type, torch.Tensor):
|
| 142 |
+
instances.set(k, tensor_source)
|
| 143 |
+
else:
|
| 144 |
+
raise ValueError("Can't handle targe type: {}".format(target_type))
|
| 145 |
+
|
| 146 |
+
ret.append(instances)
|
| 147 |
+
return ret
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class Caffe2Compatible(object):
|
| 151 |
+
"""
|
| 152 |
+
A model can inherit this class to indicate that it can be traced and deployed with caffe2.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
def _get_tensor_mode(self):
|
| 156 |
+
return self._tensor_mode
|
| 157 |
+
|
| 158 |
+
def _set_tensor_mode(self, v):
|
| 159 |
+
self._tensor_mode = v
|
| 160 |
+
|
| 161 |
+
tensor_mode = property(_get_tensor_mode, _set_tensor_mode)
|
| 162 |
+
"""
|
| 163 |
+
If true, the model expects C2-style tensor only inputs/outputs format.
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class Caffe2RPN(Caffe2Compatible, rpn.RPN):
|
| 168 |
+
@classmethod
|
| 169 |
+
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
|
| 170 |
+
ret = super(Caffe2Compatible, cls).from_config(cfg, input_shape)
|
| 171 |
+
assert tuple(cfg.MODEL.RPN.BBOX_REG_WEIGHTS) == (1.0, 1.0, 1.0, 1.0) or tuple(
|
| 172 |
+
cfg.MODEL.RPN.BBOX_REG_WEIGHTS
|
| 173 |
+
) == (1.0, 1.0, 1.0, 1.0, 1.0)
|
| 174 |
+
return ret
|
| 175 |
+
|
| 176 |
+
def _generate_proposals(
|
| 177 |
+
self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None
|
| 178 |
+
):
|
| 179 |
+
assert isinstance(images, ImageList)
|
| 180 |
+
if self.tensor_mode:
|
| 181 |
+
im_info = images.image_sizes
|
| 182 |
+
else:
|
| 183 |
+
im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to(
|
| 184 |
+
images.tensor.device
|
| 185 |
+
)
|
| 186 |
+
assert isinstance(im_info, torch.Tensor)
|
| 187 |
+
|
| 188 |
+
rpn_rois_list = []
|
| 189 |
+
rpn_roi_probs_list = []
|
| 190 |
+
for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(
|
| 191 |
+
objectness_logits_pred,
|
| 192 |
+
anchor_deltas_pred,
|
| 193 |
+
[b for (n, b) in self.anchor_generator.cell_anchors.named_buffers()],
|
| 194 |
+
self.anchor_generator.strides,
|
| 195 |
+
):
|
| 196 |
+
scores = scores.detach()
|
| 197 |
+
bbox_deltas = bbox_deltas.detach()
|
| 198 |
+
|
| 199 |
+
rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(
|
| 200 |
+
scores,
|
| 201 |
+
bbox_deltas,
|
| 202 |
+
im_info,
|
| 203 |
+
cell_anchors_tensor,
|
| 204 |
+
spatial_scale=1.0 / feat_stride,
|
| 205 |
+
pre_nms_topN=self.pre_nms_topk[self.training],
|
| 206 |
+
post_nms_topN=self.post_nms_topk[self.training],
|
| 207 |
+
nms_thresh=self.nms_thresh,
|
| 208 |
+
min_size=self.min_box_size,
|
| 209 |
+
# correct_transform_coords=True, # deprecated argument
|
| 210 |
+
angle_bound_on=True, # Default
|
| 211 |
+
angle_bound_lo=-180,
|
| 212 |
+
angle_bound_hi=180,
|
| 213 |
+
clip_angle_thresh=1.0, # Default
|
| 214 |
+
legacy_plus_one=False,
|
| 215 |
+
)
|
| 216 |
+
rpn_rois_list.append(rpn_rois)
|
| 217 |
+
rpn_roi_probs_list.append(rpn_roi_probs)
|
| 218 |
+
|
| 219 |
+
# For FPN in D2, in RPN all proposals from different levels are concated
|
| 220 |
+
# together, ranked and picked by top post_nms_topk. Then in ROIPooler
|
| 221 |
+
# it calculates level_assignments and calls the RoIAlign from
|
| 222 |
+
# the corresponding level.
|
| 223 |
+
|
| 224 |
+
if len(objectness_logits_pred) == 1:
|
| 225 |
+
rpn_rois = rpn_rois_list[0]
|
| 226 |
+
rpn_roi_probs = rpn_roi_probs_list[0]
|
| 227 |
+
else:
|
| 228 |
+
assert len(rpn_rois_list) == len(rpn_roi_probs_list)
|
| 229 |
+
rpn_post_nms_topN = self.post_nms_topk[self.training]
|
| 230 |
+
|
| 231 |
+
device = rpn_rois_list[0].device
|
| 232 |
+
input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)]
|
| 233 |
+
|
| 234 |
+
# TODO remove this after confirming rpn_max_level/rpn_min_level
|
| 235 |
+
# is not needed in CollectRpnProposals.
|
| 236 |
+
feature_strides = list(self.anchor_generator.strides)
|
| 237 |
+
rpn_min_level = int(math.log2(feature_strides[0]))
|
| 238 |
+
rpn_max_level = int(math.log2(feature_strides[-1]))
|
| 239 |
+
assert (rpn_max_level - rpn_min_level + 1) == len(
|
| 240 |
+
rpn_rois_list
|
| 241 |
+
), "CollectRpnProposals requires continuous levels"
|
| 242 |
+
|
| 243 |
+
rpn_rois = torch.ops._caffe2.CollectRpnProposals(
|
| 244 |
+
input_list,
|
| 245 |
+
# NOTE: in current implementation, rpn_max_level and rpn_min_level
|
| 246 |
+
# are not needed, only the subtraction of two matters and it
|
| 247 |
+
# can be infer from the number of inputs. Keep them now for
|
| 248 |
+
# consistency.
|
| 249 |
+
rpn_max_level=2 + len(rpn_rois_list) - 1,
|
| 250 |
+
rpn_min_level=2,
|
| 251 |
+
rpn_post_nms_topN=rpn_post_nms_topN,
|
| 252 |
+
)
|
| 253 |
+
rpn_rois = to_device(rpn_rois, device)
|
| 254 |
+
rpn_roi_probs = []
|
| 255 |
+
|
| 256 |
+
proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode)
|
| 257 |
+
return proposals, {}
|
| 258 |
+
|
| 259 |
+
def forward(self, images, features, gt_instances=None):
|
| 260 |
+
assert not self.training
|
| 261 |
+
features = [features[f] for f in self.in_features]
|
| 262 |
+
objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)
|
| 263 |
+
return self._generate_proposals(
|
| 264 |
+
images,
|
| 265 |
+
objectness_logits_pred,
|
| 266 |
+
anchor_deltas_pred,
|
| 267 |
+
gt_instances,
|
| 268 |
+
)
|
| 269 |
+
|
| 270 |
+
@staticmethod
|
| 271 |
+
def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode):
|
| 272 |
+
proposals = InstancesList(
|
| 273 |
+
im_info=im_info,
|
| 274 |
+
indices=rpn_rois[:, 0],
|
| 275 |
+
extra_fields={
|
| 276 |
+
"proposal_boxes": Caffe2Boxes(rpn_rois),
|
| 277 |
+
"objectness_logits": (torch.Tensor, rpn_roi_probs),
|
| 278 |
+
},
|
| 279 |
+
)
|
| 280 |
+
if not tensor_mode:
|
| 281 |
+
proposals = InstancesList.to_d2_instances_list(proposals)
|
| 282 |
+
else:
|
| 283 |
+
proposals = [proposals]
|
| 284 |
+
return proposals
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler):
|
| 288 |
+
@staticmethod
|
| 289 |
+
def c2_preprocess(box_lists):
|
| 290 |
+
assert all(isinstance(x, Boxes) for x in box_lists)
|
| 291 |
+
if all(isinstance(x, Caffe2Boxes) for x in box_lists):
|
| 292 |
+
# input is pure-tensor based
|
| 293 |
+
assert len(box_lists) == 1
|
| 294 |
+
pooler_fmt_boxes = box_lists[0].tensor
|
| 295 |
+
else:
|
| 296 |
+
pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists)
|
| 297 |
+
return pooler_fmt_boxes
|
| 298 |
+
|
| 299 |
+
def forward(self, x, box_lists):
|
| 300 |
+
assert not self.training
|
| 301 |
+
|
| 302 |
+
pooler_fmt_boxes = self.c2_preprocess(box_lists)
|
| 303 |
+
num_level_assignments = len(self.level_poolers)
|
| 304 |
+
|
| 305 |
+
if num_level_assignments == 1:
|
| 306 |
+
if isinstance(self.level_poolers[0], ROIAlignRotated):
|
| 307 |
+
c2_roi_align = torch.ops._caffe2.RoIAlignRotated
|
| 308 |
+
aligned = True
|
| 309 |
+
else:
|
| 310 |
+
c2_roi_align = torch.ops._caffe2.RoIAlign
|
| 311 |
+
aligned = self.level_poolers[0].aligned
|
| 312 |
+
|
| 313 |
+
x0 = x[0]
|
| 314 |
+
if x0.is_quantized:
|
| 315 |
+
x0 = x0.dequantize()
|
| 316 |
+
|
| 317 |
+
out = c2_roi_align(
|
| 318 |
+
x0,
|
| 319 |
+
pooler_fmt_boxes,
|
| 320 |
+
order="NCHW",
|
| 321 |
+
spatial_scale=float(self.level_poolers[0].spatial_scale),
|
| 322 |
+
pooled_h=int(self.output_size[0]),
|
| 323 |
+
pooled_w=int(self.output_size[1]),
|
| 324 |
+
sampling_ratio=int(self.level_poolers[0].sampling_ratio),
|
| 325 |
+
aligned=aligned,
|
| 326 |
+
)
|
| 327 |
+
return out
|
| 328 |
+
|
| 329 |
+
device = pooler_fmt_boxes.device
|
| 330 |
+
assert (
|
| 331 |
+
self.max_level - self.min_level + 1 == 4
|
| 332 |
+
), "Currently DistributeFpnProposals only support 4 levels"
|
| 333 |
+
fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(
|
| 334 |
+
to_device(pooler_fmt_boxes, "cpu"),
|
| 335 |
+
roi_canonical_scale=self.canonical_box_size,
|
| 336 |
+
roi_canonical_level=self.canonical_level,
|
| 337 |
+
roi_max_level=self.max_level,
|
| 338 |
+
roi_min_level=self.min_level,
|
| 339 |
+
legacy_plus_one=False,
|
| 340 |
+
)
|
| 341 |
+
fpn_outputs = [to_device(x, device) for x in fpn_outputs]
|
| 342 |
+
|
| 343 |
+
rois_fpn_list = fpn_outputs[:-1]
|
| 344 |
+
rois_idx_restore_int32 = fpn_outputs[-1]
|
| 345 |
+
|
| 346 |
+
roi_feat_fpn_list = []
|
| 347 |
+
for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers):
|
| 348 |
+
if isinstance(pooler, ROIAlignRotated):
|
| 349 |
+
c2_roi_align = torch.ops._caffe2.RoIAlignRotated
|
| 350 |
+
aligned = True
|
| 351 |
+
else:
|
| 352 |
+
c2_roi_align = torch.ops._caffe2.RoIAlign
|
| 353 |
+
aligned = bool(pooler.aligned)
|
| 354 |
+
|
| 355 |
+
if x_level.is_quantized:
|
| 356 |
+
x_level = x_level.dequantize()
|
| 357 |
+
|
| 358 |
+
roi_feat_fpn = c2_roi_align(
|
| 359 |
+
x_level,
|
| 360 |
+
roi_fpn,
|
| 361 |
+
order="NCHW",
|
| 362 |
+
spatial_scale=float(pooler.spatial_scale),
|
| 363 |
+
pooled_h=int(self.output_size[0]),
|
| 364 |
+
pooled_w=int(self.output_size[1]),
|
| 365 |
+
sampling_ratio=int(pooler.sampling_ratio),
|
| 366 |
+
aligned=aligned,
|
| 367 |
+
)
|
| 368 |
+
roi_feat_fpn_list.append(roi_feat_fpn)
|
| 369 |
+
|
| 370 |
+
roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0)
|
| 371 |
+
assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, (
|
| 372 |
+
"Caffe2 export requires tracing with a model checkpoint + input that can produce valid"
|
| 373 |
+
" detections. But no detections were obtained with the given checkpoint and input!"
|
| 374 |
+
)
|
| 375 |
+
roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32)
|
| 376 |
+
return roi_feat
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
class Caffe2FastRCNNOutputsInference:
|
| 380 |
+
def __init__(self, tensor_mode):
|
| 381 |
+
self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode
|
| 382 |
+
|
| 383 |
+
def __call__(self, box_predictor, predictions, proposals):
|
| 384 |
+
"""equivalent to FastRCNNOutputLayers.inference"""
|
| 385 |
+
num_classes = box_predictor.num_classes
|
| 386 |
+
score_thresh = box_predictor.test_score_thresh
|
| 387 |
+
nms_thresh = box_predictor.test_nms_thresh
|
| 388 |
+
topk_per_image = box_predictor.test_topk_per_image
|
| 389 |
+
is_rotated = len(box_predictor.box2box_transform.weights) == 5
|
| 390 |
+
|
| 391 |
+
if is_rotated:
|
| 392 |
+
box_dim = 5
|
| 393 |
+
assert box_predictor.box2box_transform.weights[4] == 1, (
|
| 394 |
+
"The weights for Rotated BBoxTransform in C2 have only 4 dimensions,"
|
| 395 |
+
+ " thus enforcing the angle weight to be 1 for now"
|
| 396 |
+
)
|
| 397 |
+
box2box_transform_weights = box_predictor.box2box_transform.weights[:4]
|
| 398 |
+
else:
|
| 399 |
+
box_dim = 4
|
| 400 |
+
box2box_transform_weights = box_predictor.box2box_transform.weights
|
| 401 |
+
|
| 402 |
+
class_logits, box_regression = predictions
|
| 403 |
+
if num_classes + 1 == class_logits.shape[1]:
|
| 404 |
+
class_prob = F.softmax(class_logits, -1)
|
| 405 |
+
else:
|
| 406 |
+
assert num_classes == class_logits.shape[1]
|
| 407 |
+
class_prob = F.sigmoid(class_logits)
|
| 408 |
+
# BoxWithNMSLimit will infer num_classes from the shape of the class_prob
|
| 409 |
+
# So append a zero column as placeholder for the background class
|
| 410 |
+
class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1)
|
| 411 |
+
|
| 412 |
+
assert box_regression.shape[1] % box_dim == 0
|
| 413 |
+
cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1
|
| 414 |
+
|
| 415 |
+
input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1
|
| 416 |
+
|
| 417 |
+
proposal_boxes = proposals[0].proposal_boxes
|
| 418 |
+
if isinstance(proposal_boxes, Caffe2Boxes):
|
| 419 |
+
rois = Caffe2Boxes.cat([p.proposal_boxes for p in proposals])
|
| 420 |
+
elif isinstance(proposal_boxes, RotatedBoxes):
|
| 421 |
+
rois = RotatedBoxes.cat([p.proposal_boxes for p in proposals])
|
| 422 |
+
elif isinstance(proposal_boxes, Boxes):
|
| 423 |
+
rois = Boxes.cat([p.proposal_boxes for p in proposals])
|
| 424 |
+
else:
|
| 425 |
+
raise NotImplementedError(
|
| 426 |
+
'Expected proposals[0].proposal_boxes to be type "Boxes", '
|
| 427 |
+
f"instead got {type(proposal_boxes)}"
|
| 428 |
+
)
|
| 429 |
+
|
| 430 |
+
device, dtype = rois.tensor.device, rois.tensor.dtype
|
| 431 |
+
if input_tensor_mode:
|
| 432 |
+
im_info = proposals[0].image_size
|
| 433 |
+
rois = rois.tensor
|
| 434 |
+
else:
|
| 435 |
+
im_info = torch.tensor(
|
| 436 |
+
[[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]]
|
| 437 |
+
)
|
| 438 |
+
batch_ids = cat(
|
| 439 |
+
[
|
| 440 |
+
torch.full((b, 1), i, dtype=dtype, device=device)
|
| 441 |
+
for i, b in enumerate(len(p) for p in proposals)
|
| 442 |
+
],
|
| 443 |
+
dim=0,
|
| 444 |
+
)
|
| 445 |
+
rois = torch.cat([batch_ids, rois.tensor], dim=1)
|
| 446 |
+
|
| 447 |
+
roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(
|
| 448 |
+
to_device(rois, "cpu"),
|
| 449 |
+
to_device(box_regression, "cpu"),
|
| 450 |
+
to_device(im_info, "cpu"),
|
| 451 |
+
weights=box2box_transform_weights,
|
| 452 |
+
apply_scale=True,
|
| 453 |
+
rotated=is_rotated,
|
| 454 |
+
angle_bound_on=True,
|
| 455 |
+
angle_bound_lo=-180,
|
| 456 |
+
angle_bound_hi=180,
|
| 457 |
+
clip_angle_thresh=1.0,
|
| 458 |
+
legacy_plus_one=False,
|
| 459 |
+
)
|
| 460 |
+
roi_pred_bbox = to_device(roi_pred_bbox, device)
|
| 461 |
+
roi_batch_splits = to_device(roi_batch_splits, device)
|
| 462 |
+
|
| 463 |
+
nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(
|
| 464 |
+
to_device(class_prob, "cpu"),
|
| 465 |
+
to_device(roi_pred_bbox, "cpu"),
|
| 466 |
+
to_device(roi_batch_splits, "cpu"),
|
| 467 |
+
score_thresh=float(score_thresh),
|
| 468 |
+
nms=float(nms_thresh),
|
| 469 |
+
detections_per_im=int(topk_per_image),
|
| 470 |
+
soft_nms_enabled=False,
|
| 471 |
+
soft_nms_method="linear",
|
| 472 |
+
soft_nms_sigma=0.5,
|
| 473 |
+
soft_nms_min_score_thres=0.001,
|
| 474 |
+
rotated=is_rotated,
|
| 475 |
+
cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,
|
| 476 |
+
input_boxes_include_bg_cls=False,
|
| 477 |
+
output_classes_include_bg_cls=False,
|
| 478 |
+
legacy_plus_one=False,
|
| 479 |
+
)
|
| 480 |
+
roi_score_nms = to_device(nms_outputs[0], device)
|
| 481 |
+
roi_bbox_nms = to_device(nms_outputs[1], device)
|
| 482 |
+
roi_class_nms = to_device(nms_outputs[2], device)
|
| 483 |
+
roi_batch_splits_nms = to_device(nms_outputs[3], device)
|
| 484 |
+
roi_keeps_nms = to_device(nms_outputs[4], device)
|
| 485 |
+
roi_keeps_size_nms = to_device(nms_outputs[5], device)
|
| 486 |
+
if not self.tensor_mode:
|
| 487 |
+
roi_class_nms = roi_class_nms.to(torch.int64)
|
| 488 |
+
|
| 489 |
+
roi_batch_ids = cat(
|
| 490 |
+
[
|
| 491 |
+
torch.full((b, 1), i, dtype=dtype, device=device)
|
| 492 |
+
for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms)
|
| 493 |
+
],
|
| 494 |
+
dim=0,
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
roi_class_nms = alias(roi_class_nms, "class_nms")
|
| 498 |
+
roi_score_nms = alias(roi_score_nms, "score_nms")
|
| 499 |
+
roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms")
|
| 500 |
+
roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms")
|
| 501 |
+
roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms")
|
| 502 |
+
roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms")
|
| 503 |
+
|
| 504 |
+
results = InstancesList(
|
| 505 |
+
im_info=im_info,
|
| 506 |
+
indices=roi_batch_ids[:, 0],
|
| 507 |
+
extra_fields={
|
| 508 |
+
"pred_boxes": Caffe2Boxes(roi_bbox_nms),
|
| 509 |
+
"scores": roi_score_nms,
|
| 510 |
+
"pred_classes": roi_class_nms,
|
| 511 |
+
},
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
if not self.tensor_mode:
|
| 515 |
+
results = InstancesList.to_d2_instances_list(results)
|
| 516 |
+
batch_splits = roi_batch_splits_nms.int().tolist()
|
| 517 |
+
kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits))
|
| 518 |
+
else:
|
| 519 |
+
results = [results]
|
| 520 |
+
kept_indices = [roi_keeps_nms]
|
| 521 |
+
|
| 522 |
+
return results, kept_indices
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
class Caffe2MaskRCNNInference:
|
| 526 |
+
def __call__(self, pred_mask_logits, pred_instances):
|
| 527 |
+
"""equivalent to mask_head.mask_rcnn_inference"""
|
| 528 |
+
if all(isinstance(x, InstancesList) for x in pred_instances):
|
| 529 |
+
assert len(pred_instances) == 1
|
| 530 |
+
mask_probs_pred = pred_mask_logits.sigmoid()
|
| 531 |
+
mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs")
|
| 532 |
+
pred_instances[0].set("pred_masks", mask_probs_pred)
|
| 533 |
+
else:
|
| 534 |
+
mask_rcnn_inference(pred_mask_logits, pred_instances)
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
class Caffe2KeypointRCNNInference:
|
| 538 |
+
def __init__(self, use_heatmap_max_keypoint):
|
| 539 |
+
self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
|
| 540 |
+
|
| 541 |
+
def __call__(self, pred_keypoint_logits, pred_instances):
|
| 542 |
+
# just return the keypoint heatmap for now,
|
| 543 |
+
# there will be option to call HeatmapMaxKeypointOp
|
| 544 |
+
output = alias(pred_keypoint_logits, "kps_score")
|
| 545 |
+
if all(isinstance(x, InstancesList) for x in pred_instances):
|
| 546 |
+
assert len(pred_instances) == 1
|
| 547 |
+
if self.use_heatmap_max_keypoint:
|
| 548 |
+
device = output.device
|
| 549 |
+
output = torch.ops._caffe2.HeatmapMaxKeypoint(
|
| 550 |
+
to_device(output, "cpu"),
|
| 551 |
+
pred_instances[0].pred_boxes.tensor,
|
| 552 |
+
should_output_softmax=True, # worth make it configerable?
|
| 553 |
+
)
|
| 554 |
+
output = to_device(output, device)
|
| 555 |
+
output = alias(output, "keypoints_out")
|
| 556 |
+
pred_instances[0].set("pred_keypoints", output)
|
| 557 |
+
return pred_keypoint_logits
|
RAVE-main/annotator/oneformer/detectron2/export/caffe2_export.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import copy
|
| 4 |
+
import io
|
| 5 |
+
import logging
|
| 6 |
+
import numpy as np
|
| 7 |
+
from typing import List
|
| 8 |
+
import onnx
|
| 9 |
+
import onnx.optimizer
|
| 10 |
+
import torch
|
| 11 |
+
from caffe2.proto import caffe2_pb2
|
| 12 |
+
from caffe2.python import core
|
| 13 |
+
from caffe2.python.onnx.backend import Caffe2Backend
|
| 14 |
+
from tabulate import tabulate
|
| 15 |
+
from termcolor import colored
|
| 16 |
+
from torch.onnx import OperatorExportTypes
|
| 17 |
+
|
| 18 |
+
from .shared import (
|
| 19 |
+
ScopedWS,
|
| 20 |
+
construct_init_net_from_params,
|
| 21 |
+
fuse_alias_placeholder,
|
| 22 |
+
fuse_copy_between_cpu_and_gpu,
|
| 23 |
+
get_params_from_init_net,
|
| 24 |
+
group_norm_replace_aten_with_caffe2,
|
| 25 |
+
infer_device_type,
|
| 26 |
+
remove_dead_end_ops,
|
| 27 |
+
remove_reshape_for_fc,
|
| 28 |
+
save_graph,
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
logger = logging.getLogger(__name__)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def export_onnx_model(model, inputs):
|
| 35 |
+
"""
|
| 36 |
+
Trace and export a model to onnx format.
|
| 37 |
+
|
| 38 |
+
Args:
|
| 39 |
+
model (nn.Module):
|
| 40 |
+
inputs (tuple[args]): the model will be called by `model(*inputs)`
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
an onnx model
|
| 44 |
+
"""
|
| 45 |
+
assert isinstance(model, torch.nn.Module)
|
| 46 |
+
|
| 47 |
+
# make sure all modules are in eval mode, onnx may change the training state
|
| 48 |
+
# of the module if the states are not consistent
|
| 49 |
+
def _check_eval(module):
|
| 50 |
+
assert not module.training
|
| 51 |
+
|
| 52 |
+
model.apply(_check_eval)
|
| 53 |
+
|
| 54 |
+
# Export the model to ONNX
|
| 55 |
+
with torch.no_grad():
|
| 56 |
+
with io.BytesIO() as f:
|
| 57 |
+
torch.onnx.export(
|
| 58 |
+
model,
|
| 59 |
+
inputs,
|
| 60 |
+
f,
|
| 61 |
+
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
|
| 62 |
+
# verbose=True, # NOTE: uncomment this for debugging
|
| 63 |
+
# export_params=True,
|
| 64 |
+
)
|
| 65 |
+
onnx_model = onnx.load_from_string(f.getvalue())
|
| 66 |
+
|
| 67 |
+
return onnx_model
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def _op_stats(net_def):
|
| 71 |
+
type_count = {}
|
| 72 |
+
for t in [op.type for op in net_def.op]:
|
| 73 |
+
type_count[t] = type_count.get(t, 0) + 1
|
| 74 |
+
type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet
|
| 75 |
+
type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count
|
| 76 |
+
return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def _assign_device_option(
|
| 80 |
+
predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor]
|
| 81 |
+
):
|
| 82 |
+
"""
|
| 83 |
+
ONNX exported network doesn't have concept of device, assign necessary
|
| 84 |
+
device option for each op in order to make it runable on GPU runtime.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
def _get_device_type(torch_tensor):
|
| 88 |
+
assert torch_tensor.device.type in ["cpu", "cuda"]
|
| 89 |
+
assert torch_tensor.device.index == 0
|
| 90 |
+
return torch_tensor.device.type
|
| 91 |
+
|
| 92 |
+
def _assign_op_device_option(net_proto, net_ssa, blob_device_types):
|
| 93 |
+
for op, ssa_i in zip(net_proto.op, net_ssa):
|
| 94 |
+
if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]:
|
| 95 |
+
op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
|
| 96 |
+
else:
|
| 97 |
+
devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]]
|
| 98 |
+
assert all(d == devices[0] for d in devices)
|
| 99 |
+
if devices[0] == "cuda":
|
| 100 |
+
op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
|
| 101 |
+
|
| 102 |
+
# update ops in predict_net
|
| 103 |
+
predict_net_input_device_types = {
|
| 104 |
+
(name, 0): _get_device_type(tensor)
|
| 105 |
+
for name, tensor in zip(predict_net.external_input, tensor_inputs)
|
| 106 |
+
}
|
| 107 |
+
predict_net_device_types = infer_device_type(
|
| 108 |
+
predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch"
|
| 109 |
+
)
|
| 110 |
+
predict_net_ssa, _ = core.get_ssa(predict_net)
|
| 111 |
+
_assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types)
|
| 112 |
+
|
| 113 |
+
# update ops in init_net
|
| 114 |
+
init_net_ssa, versions = core.get_ssa(init_net)
|
| 115 |
+
init_net_output_device_types = {
|
| 116 |
+
(name, versions[name]): predict_net_device_types[(name, 0)]
|
| 117 |
+
for name in init_net.external_output
|
| 118 |
+
}
|
| 119 |
+
init_net_device_types = infer_device_type(
|
| 120 |
+
init_net, known_status=init_net_output_device_types, device_name_style="pytorch"
|
| 121 |
+
)
|
| 122 |
+
_assign_op_device_option(init_net, init_net_ssa, init_net_device_types)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
|
| 126 |
+
"""
|
| 127 |
+
Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
|
| 128 |
+
|
| 129 |
+
Arg:
|
| 130 |
+
model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
|
| 131 |
+
tensor_inputs: a list of tensors that caffe2 model takes as input.
|
| 132 |
+
"""
|
| 133 |
+
model = copy.deepcopy(model)
|
| 134 |
+
assert isinstance(model, torch.nn.Module)
|
| 135 |
+
assert hasattr(model, "encode_additional_info")
|
| 136 |
+
|
| 137 |
+
# Export via ONNX
|
| 138 |
+
logger.info(
|
| 139 |
+
"Exporting a {} model via ONNX ...".format(type(model).__name__)
|
| 140 |
+
+ " Some warnings from ONNX are expected and are usually not to worry about."
|
| 141 |
+
)
|
| 142 |
+
onnx_model = export_onnx_model(model, (tensor_inputs,))
|
| 143 |
+
# Convert ONNX model to Caffe2 protobuf
|
| 144 |
+
init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
|
| 145 |
+
ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
|
| 146 |
+
table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
|
| 147 |
+
logger.info(
|
| 148 |
+
"ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# Apply protobuf optimization
|
| 152 |
+
fuse_alias_placeholder(predict_net, init_net)
|
| 153 |
+
if any(t.device.type != "cpu" for t in tensor_inputs):
|
| 154 |
+
fuse_copy_between_cpu_and_gpu(predict_net)
|
| 155 |
+
remove_dead_end_ops(init_net)
|
| 156 |
+
_assign_device_option(predict_net, init_net, tensor_inputs)
|
| 157 |
+
params, device_options = get_params_from_init_net(init_net)
|
| 158 |
+
predict_net, params = remove_reshape_for_fc(predict_net, params)
|
| 159 |
+
init_net = construct_init_net_from_params(params, device_options)
|
| 160 |
+
group_norm_replace_aten_with_caffe2(predict_net)
|
| 161 |
+
|
| 162 |
+
# Record necessary information for running the pb model in Detectron2 system.
|
| 163 |
+
model.encode_additional_info(predict_net, init_net)
|
| 164 |
+
|
| 165 |
+
logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
|
| 166 |
+
logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
|
| 167 |
+
|
| 168 |
+
return predict_net, init_net
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
|
| 172 |
+
"""
|
| 173 |
+
Run the caffe2 model on given inputs, recording the shape and draw the graph.
|
| 174 |
+
|
| 175 |
+
predict_net/init_net: caffe2 model.
|
| 176 |
+
tensor_inputs: a list of tensors that caffe2 model takes as input.
|
| 177 |
+
graph_save_path: path for saving graph of exported model.
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path))
|
| 181 |
+
save_graph(predict_net, graph_save_path, op_only=False)
|
| 182 |
+
|
| 183 |
+
# Run the exported Caffe2 net
|
| 184 |
+
logger.info("Running ONNX exported model ...")
|
| 185 |
+
with ScopedWS("__ws_tmp__", True) as ws:
|
| 186 |
+
ws.RunNetOnce(init_net)
|
| 187 |
+
initialized_blobs = set(ws.Blobs())
|
| 188 |
+
uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs]
|
| 189 |
+
for name, blob in zip(uninitialized, tensor_inputs):
|
| 190 |
+
ws.FeedBlob(name, blob)
|
| 191 |
+
|
| 192 |
+
try:
|
| 193 |
+
ws.RunNetOnce(predict_net)
|
| 194 |
+
except RuntimeError as e:
|
| 195 |
+
logger.warning("Encountered RuntimeError: \n{}".format(str(e)))
|
| 196 |
+
|
| 197 |
+
ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
|
| 198 |
+
blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
|
| 199 |
+
|
| 200 |
+
logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path))
|
| 201 |
+
save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
|
| 202 |
+
|
| 203 |
+
return ws_blobs
|
RAVE-main/annotator/oneformer/detectron2/export/caffe2_inference.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import logging
|
| 4 |
+
import numpy as np
|
| 5 |
+
from itertools import count
|
| 6 |
+
import torch
|
| 7 |
+
from caffe2.proto import caffe2_pb2
|
| 8 |
+
from caffe2.python import core
|
| 9 |
+
|
| 10 |
+
from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
|
| 11 |
+
from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ======
|
| 17 |
+
class ProtobufModel(torch.nn.Module):
|
| 18 |
+
"""
|
| 19 |
+
Wrapper of a caffe2's protobuf model.
|
| 20 |
+
It works just like nn.Module, but running caffe2 under the hood.
|
| 21 |
+
Input/Output are tuple[tensor] that match the caffe2 net's external_input/output.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
_ids = count(0)
|
| 25 |
+
|
| 26 |
+
def __init__(self, predict_net, init_net):
|
| 27 |
+
logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...")
|
| 28 |
+
super().__init__()
|
| 29 |
+
assert isinstance(predict_net, caffe2_pb2.NetDef)
|
| 30 |
+
assert isinstance(init_net, caffe2_pb2.NetDef)
|
| 31 |
+
# create unique temporary workspace for each instance
|
| 32 |
+
self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids))
|
| 33 |
+
self.net = core.Net(predict_net)
|
| 34 |
+
|
| 35 |
+
logger.info("Running init_net once to fill the parameters ...")
|
| 36 |
+
with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
|
| 37 |
+
ws.RunNetOnce(init_net)
|
| 38 |
+
uninitialized_external_input = []
|
| 39 |
+
for blob in self.net.Proto().external_input:
|
| 40 |
+
if blob not in ws.Blobs():
|
| 41 |
+
uninitialized_external_input.append(blob)
|
| 42 |
+
ws.CreateBlob(blob)
|
| 43 |
+
ws.CreateNet(self.net)
|
| 44 |
+
|
| 45 |
+
self._error_msgs = set()
|
| 46 |
+
self._input_blobs = uninitialized_external_input
|
| 47 |
+
|
| 48 |
+
def _infer_output_devices(self, inputs):
|
| 49 |
+
"""
|
| 50 |
+
Returns:
|
| 51 |
+
list[str]: list of device for each external output
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def _get_device_type(torch_tensor):
|
| 55 |
+
assert torch_tensor.device.type in ["cpu", "cuda"]
|
| 56 |
+
assert torch_tensor.device.index == 0
|
| 57 |
+
return torch_tensor.device.type
|
| 58 |
+
|
| 59 |
+
predict_net = self.net.Proto()
|
| 60 |
+
input_device_types = {
|
| 61 |
+
(name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs)
|
| 62 |
+
}
|
| 63 |
+
device_type_map = infer_device_type(
|
| 64 |
+
predict_net, known_status=input_device_types, device_name_style="pytorch"
|
| 65 |
+
)
|
| 66 |
+
ssa, versions = core.get_ssa(predict_net)
|
| 67 |
+
versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
|
| 68 |
+
output_devices = [device_type_map[outp] for outp in versioned_outputs]
|
| 69 |
+
return output_devices
|
| 70 |
+
|
| 71 |
+
def forward(self, inputs):
|
| 72 |
+
"""
|
| 73 |
+
Args:
|
| 74 |
+
inputs (tuple[torch.Tensor])
|
| 75 |
+
|
| 76 |
+
Returns:
|
| 77 |
+
tuple[torch.Tensor]
|
| 78 |
+
"""
|
| 79 |
+
assert len(inputs) == len(self._input_blobs), (
|
| 80 |
+
f"Length of inputs ({len(inputs)}) "
|
| 81 |
+
f"doesn't match the required input blobs: {self._input_blobs}"
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
|
| 85 |
+
for b, tensor in zip(self._input_blobs, inputs):
|
| 86 |
+
ws.FeedBlob(b, tensor)
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
ws.RunNet(self.net.Proto().name)
|
| 90 |
+
except RuntimeError as e:
|
| 91 |
+
if not str(e) in self._error_msgs:
|
| 92 |
+
self._error_msgs.add(str(e))
|
| 93 |
+
logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
|
| 94 |
+
logger.warning("Catch the error and use partial results.")
|
| 95 |
+
|
| 96 |
+
c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
|
| 97 |
+
# Remove outputs of current run, this is necessary in order to
|
| 98 |
+
# prevent fetching the result from previous run if the model fails
|
| 99 |
+
# in the middle.
|
| 100 |
+
for b in self.net.Proto().external_output:
|
| 101 |
+
# Needs to create uninitialized blob to make the net runable.
|
| 102 |
+
# This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b),
|
| 103 |
+
# but there'no such API.
|
| 104 |
+
ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).")
|
| 105 |
+
|
| 106 |
+
# Cast output to torch.Tensor on the desired device
|
| 107 |
+
output_devices = (
|
| 108 |
+
self._infer_output_devices(inputs)
|
| 109 |
+
if any(t.device.type != "cpu" for t in inputs)
|
| 110 |
+
else ["cpu" for _ in self.net.Proto().external_output]
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
outputs = []
|
| 114 |
+
for name, c2_output, device in zip(
|
| 115 |
+
self.net.Proto().external_output, c2_outputs, output_devices
|
| 116 |
+
):
|
| 117 |
+
if not isinstance(c2_output, np.ndarray):
|
| 118 |
+
raise RuntimeError(
|
| 119 |
+
"Invalid output for blob {}, received: {}".format(name, c2_output)
|
| 120 |
+
)
|
| 121 |
+
outputs.append(torch.tensor(c2_output).to(device=device))
|
| 122 |
+
return tuple(outputs)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class ProtobufDetectionModel(torch.nn.Module):
|
| 126 |
+
"""
|
| 127 |
+
A class works just like a pytorch meta arch in terms of inference, but running
|
| 128 |
+
caffe2 model under the hood.
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
def __init__(self, predict_net, init_net, *, convert_outputs=None):
|
| 132 |
+
"""
|
| 133 |
+
Args:
|
| 134 |
+
predict_net, init_net (core.Net): caffe2 nets
|
| 135 |
+
convert_outptus (callable): a function that converts caffe2
|
| 136 |
+
outputs to the same format of the original pytorch model.
|
| 137 |
+
By default, use the one defined in the caffe2 meta_arch.
|
| 138 |
+
"""
|
| 139 |
+
super().__init__()
|
| 140 |
+
self.protobuf_model = ProtobufModel(predict_net, init_net)
|
| 141 |
+
self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
|
| 142 |
+
self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
|
| 143 |
+
|
| 144 |
+
if convert_outputs is None:
|
| 145 |
+
meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
|
| 146 |
+
meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
|
| 147 |
+
self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
|
| 148 |
+
else:
|
| 149 |
+
self._convert_outputs = convert_outputs
|
| 150 |
+
|
| 151 |
+
def _convert_inputs(self, batched_inputs):
|
| 152 |
+
# currently all models convert inputs in the same way
|
| 153 |
+
return convert_batched_inputs_to_c2_format(
|
| 154 |
+
batched_inputs, self.size_divisibility, self.device
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
def forward(self, batched_inputs):
|
| 158 |
+
c2_inputs = self._convert_inputs(batched_inputs)
|
| 159 |
+
c2_results = self.protobuf_model(c2_inputs)
|
| 160 |
+
c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results))
|
| 161 |
+
return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
|
RAVE-main/annotator/oneformer/detectron2/export/caffe2_modeling.py
ADDED
|
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import functools
|
| 4 |
+
import io
|
| 5 |
+
import struct
|
| 6 |
+
import types
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from annotator.oneformer.detectron2.modeling import meta_arch
|
| 10 |
+
from annotator.oneformer.detectron2.modeling.box_regression import Box2BoxTransform
|
| 11 |
+
from annotator.oneformer.detectron2.modeling.roi_heads import keypoint_head
|
| 12 |
+
from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
|
| 13 |
+
|
| 14 |
+
from .c10 import Caffe2Compatible
|
| 15 |
+
from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn
|
| 16 |
+
from .shared import (
|
| 17 |
+
alias,
|
| 18 |
+
check_set_pb_arg,
|
| 19 |
+
get_pb_arg_floats,
|
| 20 |
+
get_pb_arg_valf,
|
| 21 |
+
get_pb_arg_vali,
|
| 22 |
+
get_pb_arg_vals,
|
| 23 |
+
mock_torch_nn_functional_interpolate,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False):
|
| 28 |
+
"""
|
| 29 |
+
A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor])
|
| 30 |
+
to detectron2's format (i.e. list of Instances instance).
|
| 31 |
+
This only works when the model follows the Caffe2 detectron's naming convention.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
image_sizes (List[List[int, int]]): [H, W] of every image.
|
| 35 |
+
tensor_outputs (Dict[str, Tensor]): external_output to its tensor.
|
| 36 |
+
|
| 37 |
+
force_mask_on (Bool): if true, the it make sure there'll be pred_masks even
|
| 38 |
+
if the mask is not found from tensor_outputs (usually due to model crash)
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
results = [Instances(image_size) for image_size in image_sizes]
|
| 42 |
+
|
| 43 |
+
batch_splits = tensor_outputs.get("batch_splits", None)
|
| 44 |
+
if batch_splits:
|
| 45 |
+
raise NotImplementedError()
|
| 46 |
+
assert len(image_sizes) == 1
|
| 47 |
+
result = results[0]
|
| 48 |
+
|
| 49 |
+
bbox_nms = tensor_outputs["bbox_nms"]
|
| 50 |
+
score_nms = tensor_outputs["score_nms"]
|
| 51 |
+
class_nms = tensor_outputs["class_nms"]
|
| 52 |
+
# Detection will always success because Conv support 0-batch
|
| 53 |
+
assert bbox_nms is not None
|
| 54 |
+
assert score_nms is not None
|
| 55 |
+
assert class_nms is not None
|
| 56 |
+
if bbox_nms.shape[1] == 5:
|
| 57 |
+
result.pred_boxes = RotatedBoxes(bbox_nms)
|
| 58 |
+
else:
|
| 59 |
+
result.pred_boxes = Boxes(bbox_nms)
|
| 60 |
+
result.scores = score_nms
|
| 61 |
+
result.pred_classes = class_nms.to(torch.int64)
|
| 62 |
+
|
| 63 |
+
mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None)
|
| 64 |
+
if mask_fcn_probs is not None:
|
| 65 |
+
# finish the mask pred
|
| 66 |
+
mask_probs_pred = mask_fcn_probs
|
| 67 |
+
num_masks = mask_probs_pred.shape[0]
|
| 68 |
+
class_pred = result.pred_classes
|
| 69 |
+
indices = torch.arange(num_masks, device=class_pred.device)
|
| 70 |
+
mask_probs_pred = mask_probs_pred[indices, class_pred][:, None]
|
| 71 |
+
result.pred_masks = mask_probs_pred
|
| 72 |
+
elif force_mask_on:
|
| 73 |
+
# NOTE: there's no way to know the height/width of mask here, it won't be
|
| 74 |
+
# used anyway when batch size is 0, so just set them to 0.
|
| 75 |
+
result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8)
|
| 76 |
+
|
| 77 |
+
keypoints_out = tensor_outputs.get("keypoints_out", None)
|
| 78 |
+
kps_score = tensor_outputs.get("kps_score", None)
|
| 79 |
+
if keypoints_out is not None:
|
| 80 |
+
# keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob)
|
| 81 |
+
keypoints_tensor = keypoints_out
|
| 82 |
+
# NOTE: it's possible that prob is not calculated if "should_output_softmax"
|
| 83 |
+
# is set to False in HeatmapMaxKeypoint, so just using raw score, seems
|
| 84 |
+
# it doesn't affect mAP. TODO: check more carefully.
|
| 85 |
+
keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]]
|
| 86 |
+
result.pred_keypoints = keypoint_xyp
|
| 87 |
+
elif kps_score is not None:
|
| 88 |
+
# keypoint heatmap to sparse data structure
|
| 89 |
+
pred_keypoint_logits = kps_score
|
| 90 |
+
keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result])
|
| 91 |
+
|
| 92 |
+
return results
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _cast_to_f32(f64):
|
| 96 |
+
return struct.unpack("f", struct.pack("f", f64))[0]
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def set_caffe2_compatible_tensor_mode(model, enable=True):
|
| 100 |
+
def _fn(m):
|
| 101 |
+
if isinstance(m, Caffe2Compatible):
|
| 102 |
+
m.tensor_mode = enable
|
| 103 |
+
|
| 104 |
+
model.apply(_fn)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device):
|
| 108 |
+
"""
|
| 109 |
+
See get_caffe2_inputs() below.
|
| 110 |
+
"""
|
| 111 |
+
assert all(isinstance(x, dict) for x in batched_inputs)
|
| 112 |
+
assert all(x["image"].dim() == 3 for x in batched_inputs)
|
| 113 |
+
|
| 114 |
+
images = [x["image"] for x in batched_inputs]
|
| 115 |
+
images = ImageList.from_tensors(images, size_divisibility)
|
| 116 |
+
|
| 117 |
+
im_info = []
|
| 118 |
+
for input_per_image, image_size in zip(batched_inputs, images.image_sizes):
|
| 119 |
+
target_height = input_per_image.get("height", image_size[0])
|
| 120 |
+
target_width = input_per_image.get("width", image_size[1]) # noqa
|
| 121 |
+
# NOTE: The scale inside im_info is kept as convention and for providing
|
| 122 |
+
# post-processing information if further processing is needed. For
|
| 123 |
+
# current Caffe2 model definitions that don't include post-processing inside
|
| 124 |
+
# the model, this number is not used.
|
| 125 |
+
# NOTE: There can be a slight difference between width and height
|
| 126 |
+
# scales, using a single number can results in numerical difference
|
| 127 |
+
# compared with D2's post-processing.
|
| 128 |
+
scale = target_height / image_size[0]
|
| 129 |
+
im_info.append([image_size[0], image_size[1], scale])
|
| 130 |
+
im_info = torch.Tensor(im_info)
|
| 131 |
+
|
| 132 |
+
return images.tensor.to(device), im_info.to(device)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module):
|
| 136 |
+
"""
|
| 137 |
+
Base class for caffe2-compatible implementation of a meta architecture.
|
| 138 |
+
The forward is traceable and its traced graph can be converted to caffe2
|
| 139 |
+
graph through ONNX.
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
def __init__(self, cfg, torch_model):
|
| 143 |
+
"""
|
| 144 |
+
Args:
|
| 145 |
+
cfg (CfgNode):
|
| 146 |
+
torch_model (nn.Module): the detectron2 model (meta_arch) to be
|
| 147 |
+
converted.
|
| 148 |
+
"""
|
| 149 |
+
super().__init__()
|
| 150 |
+
self._wrapped_model = torch_model
|
| 151 |
+
self.eval()
|
| 152 |
+
set_caffe2_compatible_tensor_mode(self, True)
|
| 153 |
+
|
| 154 |
+
def get_caffe2_inputs(self, batched_inputs):
|
| 155 |
+
"""
|
| 156 |
+
Convert pytorch-style structured inputs to caffe2-style inputs that
|
| 157 |
+
are tuples of tensors.
|
| 158 |
+
|
| 159 |
+
Args:
|
| 160 |
+
batched_inputs (list[dict]): inputs to a detectron2 model
|
| 161 |
+
in its standard format. Each dict has "image" (CHW tensor), and optionally
|
| 162 |
+
"height" and "width".
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
tuple[Tensor]:
|
| 166 |
+
tuple of tensors that will be the inputs to the
|
| 167 |
+
:meth:`forward` method. For existing models, the first
|
| 168 |
+
is an NCHW tensor (padded and batched); the second is
|
| 169 |
+
a im_info Nx3 tensor, where the rows are
|
| 170 |
+
(height, width, unused legacy parameter)
|
| 171 |
+
"""
|
| 172 |
+
return convert_batched_inputs_to_c2_format(
|
| 173 |
+
batched_inputs,
|
| 174 |
+
self._wrapped_model.backbone.size_divisibility,
|
| 175 |
+
self._wrapped_model.device,
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
def encode_additional_info(self, predict_net, init_net):
|
| 179 |
+
"""
|
| 180 |
+
Save extra metadata that will be used by inference in the output protobuf.
|
| 181 |
+
"""
|
| 182 |
+
pass
|
| 183 |
+
|
| 184 |
+
def forward(self, inputs):
|
| 185 |
+
"""
|
| 186 |
+
Run the forward in caffe2-style. It has to use caffe2-compatible ops
|
| 187 |
+
and the method will be used for tracing.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`.
|
| 191 |
+
They will be the inputs of the converted caffe2 graph.
|
| 192 |
+
|
| 193 |
+
Returns:
|
| 194 |
+
tuple[Tensor]: output tensors. They will be the outputs of the
|
| 195 |
+
converted caffe2 graph.
|
| 196 |
+
"""
|
| 197 |
+
raise NotImplementedError
|
| 198 |
+
|
| 199 |
+
def _caffe2_preprocess_image(self, inputs):
|
| 200 |
+
"""
|
| 201 |
+
Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward.
|
| 202 |
+
It normalizes the input images, and the final caffe2 graph assumes the
|
| 203 |
+
inputs have been batched already.
|
| 204 |
+
"""
|
| 205 |
+
data, im_info = inputs
|
| 206 |
+
data = alias(data, "data")
|
| 207 |
+
im_info = alias(im_info, "im_info")
|
| 208 |
+
mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std
|
| 209 |
+
normalized_data = (data - mean) / std
|
| 210 |
+
normalized_data = alias(normalized_data, "normalized_data")
|
| 211 |
+
|
| 212 |
+
# Pack (data, im_info) into ImageList which is recognized by self.inference.
|
| 213 |
+
images = ImageList(tensor=normalized_data, image_sizes=im_info)
|
| 214 |
+
return images
|
| 215 |
+
|
| 216 |
+
@staticmethod
|
| 217 |
+
def get_outputs_converter(predict_net, init_net):
|
| 218 |
+
"""
|
| 219 |
+
Creates a function that converts outputs of the caffe2 model to
|
| 220 |
+
detectron2's standard format.
|
| 221 |
+
The function uses information in `predict_net` and `init_net` that are
|
| 222 |
+
available at inferene time. Therefore the function logic can be used in inference.
|
| 223 |
+
|
| 224 |
+
The returned function has the following signature:
|
| 225 |
+
|
| 226 |
+
def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs
|
| 227 |
+
|
| 228 |
+
Where
|
| 229 |
+
|
| 230 |
+
* batched_inputs (list[dict]): the original input format of the meta arch
|
| 231 |
+
* c2_inputs (tuple[Tensor]): the caffe2 inputs.
|
| 232 |
+
* c2_results (dict[str, Tensor]): the caffe2 output format,
|
| 233 |
+
corresponding to the outputs of the :meth:`forward` function.
|
| 234 |
+
* detectron2_outputs: the original output format of the meta arch.
|
| 235 |
+
|
| 236 |
+
This function can be used to compare the outputs of the original meta arch and
|
| 237 |
+
the converted caffe2 graph.
|
| 238 |
+
|
| 239 |
+
Returns:
|
| 240 |
+
callable: a callable of the above signature.
|
| 241 |
+
"""
|
| 242 |
+
raise NotImplementedError
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class Caffe2GeneralizedRCNN(Caffe2MetaArch):
|
| 246 |
+
def __init__(self, cfg, torch_model):
|
| 247 |
+
assert isinstance(torch_model, meta_arch.GeneralizedRCNN)
|
| 248 |
+
torch_model = patch_generalized_rcnn(torch_model)
|
| 249 |
+
super().__init__(cfg, torch_model)
|
| 250 |
+
|
| 251 |
+
try:
|
| 252 |
+
use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT
|
| 253 |
+
except AttributeError:
|
| 254 |
+
use_heatmap_max_keypoint = False
|
| 255 |
+
self.roi_heads_patcher = ROIHeadsPatcher(
|
| 256 |
+
self._wrapped_model.roi_heads, use_heatmap_max_keypoint
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
def encode_additional_info(self, predict_net, init_net):
|
| 260 |
+
size_divisibility = self._wrapped_model.backbone.size_divisibility
|
| 261 |
+
check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
|
| 262 |
+
check_set_pb_arg(
|
| 263 |
+
predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
|
| 264 |
+
)
|
| 265 |
+
check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN")
|
| 266 |
+
|
| 267 |
+
@mock_torch_nn_functional_interpolate()
|
| 268 |
+
def forward(self, inputs):
|
| 269 |
+
if not self.tensor_mode:
|
| 270 |
+
return self._wrapped_model.inference(inputs)
|
| 271 |
+
images = self._caffe2_preprocess_image(inputs)
|
| 272 |
+
features = self._wrapped_model.backbone(images.tensor)
|
| 273 |
+
proposals, _ = self._wrapped_model.proposal_generator(images, features)
|
| 274 |
+
with self.roi_heads_patcher.mock_roi_heads():
|
| 275 |
+
detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
|
| 276 |
+
return tuple(detector_results[0].flatten())
|
| 277 |
+
|
| 278 |
+
@staticmethod
|
| 279 |
+
def get_outputs_converter(predict_net, init_net):
|
| 280 |
+
def f(batched_inputs, c2_inputs, c2_results):
|
| 281 |
+
_, im_info = c2_inputs
|
| 282 |
+
image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
|
| 283 |
+
results = assemble_rcnn_outputs_by_name(image_sizes, c2_results)
|
| 284 |
+
return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
|
| 285 |
+
|
| 286 |
+
return f
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class Caffe2RetinaNet(Caffe2MetaArch):
|
| 290 |
+
def __init__(self, cfg, torch_model):
|
| 291 |
+
assert isinstance(torch_model, meta_arch.RetinaNet)
|
| 292 |
+
super().__init__(cfg, torch_model)
|
| 293 |
+
|
| 294 |
+
@mock_torch_nn_functional_interpolate()
|
| 295 |
+
def forward(self, inputs):
|
| 296 |
+
assert self.tensor_mode
|
| 297 |
+
images = self._caffe2_preprocess_image(inputs)
|
| 298 |
+
|
| 299 |
+
# explicitly return the images sizes to avoid removing "im_info" by ONNX
|
| 300 |
+
# since it's not used in the forward path
|
| 301 |
+
return_tensors = [images.image_sizes]
|
| 302 |
+
|
| 303 |
+
features = self._wrapped_model.backbone(images.tensor)
|
| 304 |
+
features = [features[f] for f in self._wrapped_model.head_in_features]
|
| 305 |
+
for i, feature_i in enumerate(features):
|
| 306 |
+
features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True)
|
| 307 |
+
return_tensors.append(features[i])
|
| 308 |
+
|
| 309 |
+
pred_logits, pred_anchor_deltas = self._wrapped_model.head(features)
|
| 310 |
+
for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)):
|
| 311 |
+
return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i)))
|
| 312 |
+
return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i)))
|
| 313 |
+
|
| 314 |
+
return tuple(return_tensors)
|
| 315 |
+
|
| 316 |
+
def encode_additional_info(self, predict_net, init_net):
|
| 317 |
+
size_divisibility = self._wrapped_model.backbone.size_divisibility
|
| 318 |
+
check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
|
| 319 |
+
check_set_pb_arg(
|
| 320 |
+
predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
|
| 321 |
+
)
|
| 322 |
+
check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet")
|
| 323 |
+
|
| 324 |
+
# Inference parameters:
|
| 325 |
+
check_set_pb_arg(
|
| 326 |
+
predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh)
|
| 327 |
+
)
|
| 328 |
+
check_set_pb_arg(
|
| 329 |
+
predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates
|
| 330 |
+
)
|
| 331 |
+
check_set_pb_arg(
|
| 332 |
+
predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh)
|
| 333 |
+
)
|
| 334 |
+
check_set_pb_arg(
|
| 335 |
+
predict_net,
|
| 336 |
+
"max_detections_per_image",
|
| 337 |
+
"i",
|
| 338 |
+
self._wrapped_model.max_detections_per_image,
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
check_set_pb_arg(
|
| 342 |
+
predict_net,
|
| 343 |
+
"bbox_reg_weights",
|
| 344 |
+
"floats",
|
| 345 |
+
[_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights],
|
| 346 |
+
)
|
| 347 |
+
self._encode_anchor_generator_cfg(predict_net)
|
| 348 |
+
|
| 349 |
+
def _encode_anchor_generator_cfg(self, predict_net):
|
| 350 |
+
# serialize anchor_generator for future use
|
| 351 |
+
serialized_anchor_generator = io.BytesIO()
|
| 352 |
+
torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator)
|
| 353 |
+
# Ideally we can put anchor generating inside the model, then we don't
|
| 354 |
+
# need to store this information.
|
| 355 |
+
bytes = serialized_anchor_generator.getvalue()
|
| 356 |
+
check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes)
|
| 357 |
+
|
| 358 |
+
@staticmethod
|
| 359 |
+
def get_outputs_converter(predict_net, init_net):
|
| 360 |
+
self = types.SimpleNamespace()
|
| 361 |
+
serialized_anchor_generator = io.BytesIO(
|
| 362 |
+
get_pb_arg_vals(predict_net, "serialized_anchor_generator", None)
|
| 363 |
+
)
|
| 364 |
+
self.anchor_generator = torch.load(serialized_anchor_generator)
|
| 365 |
+
bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None)
|
| 366 |
+
self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights))
|
| 367 |
+
self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None)
|
| 368 |
+
self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None)
|
| 369 |
+
self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None)
|
| 370 |
+
self.max_detections_per_image = get_pb_arg_vali(
|
| 371 |
+
predict_net, "max_detections_per_image", None
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
# hack to reuse inference code from RetinaNet
|
| 375 |
+
for meth in [
|
| 376 |
+
"forward_inference",
|
| 377 |
+
"inference_single_image",
|
| 378 |
+
"_transpose_dense_predictions",
|
| 379 |
+
"_decode_multi_level_predictions",
|
| 380 |
+
"_decode_per_level_predictions",
|
| 381 |
+
]:
|
| 382 |
+
setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self))
|
| 383 |
+
|
| 384 |
+
def f(batched_inputs, c2_inputs, c2_results):
|
| 385 |
+
_, im_info = c2_inputs
|
| 386 |
+
image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
|
| 387 |
+
dummy_images = ImageList(
|
| 388 |
+
torch.randn(
|
| 389 |
+
(
|
| 390 |
+
len(im_info),
|
| 391 |
+
3,
|
| 392 |
+
)
|
| 393 |
+
+ tuple(image_sizes[0])
|
| 394 |
+
),
|
| 395 |
+
image_sizes,
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")])
|
| 399 |
+
pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)]
|
| 400 |
+
pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)]
|
| 401 |
+
|
| 402 |
+
# For each feature level, feature should have the same batch size and
|
| 403 |
+
# spatial dimension as the box_cls and box_delta.
|
| 404 |
+
dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits]
|
| 405 |
+
# self.num_classess can be inferred
|
| 406 |
+
self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4)
|
| 407 |
+
|
| 408 |
+
results = self.forward_inference(
|
| 409 |
+
dummy_images, dummy_features, [pred_logits, pred_anchor_deltas]
|
| 410 |
+
)
|
| 411 |
+
return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
|
| 412 |
+
|
| 413 |
+
return f
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
META_ARCH_CAFFE2_EXPORT_TYPE_MAP = {
|
| 417 |
+
"GeneralizedRCNN": Caffe2GeneralizedRCNN,
|
| 418 |
+
"RetinaNet": Caffe2RetinaNet,
|
| 419 |
+
}
|
RAVE-main/annotator/oneformer/detectron2/export/caffe2_patch.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import contextlib
|
| 4 |
+
from unittest import mock
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from annotator.oneformer.detectron2.modeling import poolers
|
| 8 |
+
from annotator.oneformer.detectron2.modeling.proposal_generator import rpn
|
| 9 |
+
from annotator.oneformer.detectron2.modeling.roi_heads import keypoint_head, mask_head
|
| 10 |
+
from annotator.oneformer.detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
|
| 11 |
+
|
| 12 |
+
from .c10 import (
|
| 13 |
+
Caffe2Compatible,
|
| 14 |
+
Caffe2FastRCNNOutputsInference,
|
| 15 |
+
Caffe2KeypointRCNNInference,
|
| 16 |
+
Caffe2MaskRCNNInference,
|
| 17 |
+
Caffe2ROIPooler,
|
| 18 |
+
Caffe2RPN,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class GenericMixin(object):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class Caffe2CompatibleConverter(object):
|
| 27 |
+
"""
|
| 28 |
+
A GenericUpdater which implements the `create_from` interface, by modifying
|
| 29 |
+
module object and assign it with another class replaceCls.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
def __init__(self, replaceCls):
|
| 33 |
+
self.replaceCls = replaceCls
|
| 34 |
+
|
| 35 |
+
def create_from(self, module):
|
| 36 |
+
# update module's class to the new class
|
| 37 |
+
assert isinstance(module, torch.nn.Module)
|
| 38 |
+
if issubclass(self.replaceCls, GenericMixin):
|
| 39 |
+
# replaceCls should act as mixin, create a new class on-the-fly
|
| 40 |
+
new_class = type(
|
| 41 |
+
"{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__),
|
| 42 |
+
(self.replaceCls, module.__class__),
|
| 43 |
+
{}, # {"new_method": lambda self: ...},
|
| 44 |
+
)
|
| 45 |
+
module.__class__ = new_class
|
| 46 |
+
else:
|
| 47 |
+
# replaceCls is complete class, this allow arbitrary class swap
|
| 48 |
+
module.__class__ = self.replaceCls
|
| 49 |
+
|
| 50 |
+
# initialize Caffe2Compatible
|
| 51 |
+
if isinstance(module, Caffe2Compatible):
|
| 52 |
+
module.tensor_mode = False
|
| 53 |
+
|
| 54 |
+
return module
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def patch(model, target, updater, *args, **kwargs):
|
| 58 |
+
"""
|
| 59 |
+
recursively (post-order) update all modules with the target type and its
|
| 60 |
+
subclasses, make a initialization/composition/inheritance/... via the
|
| 61 |
+
updater.create_from.
|
| 62 |
+
"""
|
| 63 |
+
for name, module in model.named_children():
|
| 64 |
+
model._modules[name] = patch(module, target, updater, *args, **kwargs)
|
| 65 |
+
if isinstance(model, target):
|
| 66 |
+
return updater.create_from(model, *args, **kwargs)
|
| 67 |
+
return model
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def patch_generalized_rcnn(model):
|
| 71 |
+
ccc = Caffe2CompatibleConverter
|
| 72 |
+
model = patch(model, rpn.RPN, ccc(Caffe2RPN))
|
| 73 |
+
model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler))
|
| 74 |
+
|
| 75 |
+
return model
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
@contextlib.contextmanager
|
| 79 |
+
def mock_fastrcnn_outputs_inference(
|
| 80 |
+
tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers
|
| 81 |
+
):
|
| 82 |
+
with mock.patch.object(
|
| 83 |
+
box_predictor_type,
|
| 84 |
+
"inference",
|
| 85 |
+
autospec=True,
|
| 86 |
+
side_effect=Caffe2FastRCNNOutputsInference(tensor_mode),
|
| 87 |
+
) as mocked_func:
|
| 88 |
+
yield
|
| 89 |
+
if check:
|
| 90 |
+
assert mocked_func.call_count > 0
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
@contextlib.contextmanager
|
| 94 |
+
def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True):
|
| 95 |
+
with mock.patch(
|
| 96 |
+
"{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference()
|
| 97 |
+
) as mocked_func:
|
| 98 |
+
yield
|
| 99 |
+
if check:
|
| 100 |
+
assert mocked_func.call_count > 0
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@contextlib.contextmanager
|
| 104 |
+
def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True):
|
| 105 |
+
with mock.patch(
|
| 106 |
+
"{}.keypoint_rcnn_inference".format(patched_module),
|
| 107 |
+
side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint),
|
| 108 |
+
) as mocked_func:
|
| 109 |
+
yield
|
| 110 |
+
if check:
|
| 111 |
+
assert mocked_func.call_count > 0
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class ROIHeadsPatcher:
|
| 115 |
+
def __init__(self, heads, use_heatmap_max_keypoint):
|
| 116 |
+
self.heads = heads
|
| 117 |
+
self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
|
| 118 |
+
|
| 119 |
+
@contextlib.contextmanager
|
| 120 |
+
def mock_roi_heads(self, tensor_mode=True):
|
| 121 |
+
"""
|
| 122 |
+
Patching several inference functions inside ROIHeads and its subclasses
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
tensor_mode (bool): whether the inputs/outputs are caffe2's tensor
|
| 126 |
+
format or not. Default to True.
|
| 127 |
+
"""
|
| 128 |
+
# NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference`
|
| 129 |
+
# are called inside the same file as BaseXxxHead due to using mock.patch.
|
| 130 |
+
kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__
|
| 131 |
+
mask_head_mod = mask_head.BaseMaskRCNNHead.__module__
|
| 132 |
+
|
| 133 |
+
mock_ctx_managers = [
|
| 134 |
+
mock_fastrcnn_outputs_inference(
|
| 135 |
+
tensor_mode=tensor_mode,
|
| 136 |
+
check=True,
|
| 137 |
+
box_predictor_type=type(self.heads.box_predictor),
|
| 138 |
+
)
|
| 139 |
+
]
|
| 140 |
+
if getattr(self.heads, "keypoint_on", False):
|
| 141 |
+
mock_ctx_managers += [
|
| 142 |
+
mock_keypoint_rcnn_inference(
|
| 143 |
+
tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint
|
| 144 |
+
)
|
| 145 |
+
]
|
| 146 |
+
if getattr(self.heads, "mask_on", False):
|
| 147 |
+
mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)]
|
| 148 |
+
|
| 149 |
+
with contextlib.ExitStack() as stack: # python 3.3+
|
| 150 |
+
for mgr in mock_ctx_managers:
|
| 151 |
+
stack.enter_context(mgr)
|
| 152 |
+
yield
|
RAVE-main/annotator/oneformer/detectron2/export/flatten.py
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
import collections
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
from typing import Callable, List, Optional, Tuple
|
| 5 |
+
import torch
|
| 6 |
+
from torch import nn
|
| 7 |
+
|
| 8 |
+
from annotator.oneformer.detectron2.structures import Boxes, Instances, ROIMasks
|
| 9 |
+
from annotator.oneformer.detectron2.utils.registry import _convert_target_to_string, locate
|
| 10 |
+
|
| 11 |
+
from .torchscript_patch import patch_builtin_len
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class Schema:
|
| 16 |
+
"""
|
| 17 |
+
A Schema defines how to flatten a possibly hierarchical object into tuple of
|
| 18 |
+
primitive objects, so it can be used as inputs/outputs of PyTorch's tracing.
|
| 19 |
+
|
| 20 |
+
PyTorch does not support tracing a function that produces rich output
|
| 21 |
+
structures (e.g. dict, Instances, Boxes). To trace such a function, we
|
| 22 |
+
flatten the rich object into tuple of tensors, and return this tuple of tensors
|
| 23 |
+
instead. Meanwhile, we also need to know how to "rebuild" the original object
|
| 24 |
+
from the flattened results, so we can evaluate the flattened results.
|
| 25 |
+
A Schema defines how to flatten an object, and while flattening it, it records
|
| 26 |
+
necessary schemas so that the object can be rebuilt using the flattened outputs.
|
| 27 |
+
|
| 28 |
+
The flattened object and the schema object is returned by ``.flatten`` classmethod.
|
| 29 |
+
Then the original object can be rebuilt with the ``__call__`` method of schema.
|
| 30 |
+
|
| 31 |
+
A Schema is a dataclass that can be serialized easily.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
# inspired by FetchMapper in tensorflow/python/client/session.py
|
| 35 |
+
|
| 36 |
+
@classmethod
|
| 37 |
+
def flatten(cls, obj):
|
| 38 |
+
raise NotImplementedError
|
| 39 |
+
|
| 40 |
+
def __call__(self, values):
|
| 41 |
+
raise NotImplementedError
|
| 42 |
+
|
| 43 |
+
@staticmethod
|
| 44 |
+
def _concat(values):
|
| 45 |
+
ret = ()
|
| 46 |
+
sizes = []
|
| 47 |
+
for v in values:
|
| 48 |
+
assert isinstance(v, tuple), "Flattened results must be a tuple"
|
| 49 |
+
ret = ret + v
|
| 50 |
+
sizes.append(len(v))
|
| 51 |
+
return ret, sizes
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def _split(values, sizes):
|
| 55 |
+
if len(sizes):
|
| 56 |
+
expected_len = sum(sizes)
|
| 57 |
+
assert (
|
| 58 |
+
len(values) == expected_len
|
| 59 |
+
), f"Values has length {len(values)} but expect length {expected_len}."
|
| 60 |
+
ret = []
|
| 61 |
+
for k in range(len(sizes)):
|
| 62 |
+
begin, end = sum(sizes[:k]), sum(sizes[: k + 1])
|
| 63 |
+
ret.append(values[begin:end])
|
| 64 |
+
return ret
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@dataclass
|
| 68 |
+
class ListSchema(Schema):
|
| 69 |
+
schemas: List[Schema] # the schemas that define how to flatten each element in the list
|
| 70 |
+
sizes: List[int] # the flattened length of each element
|
| 71 |
+
|
| 72 |
+
def __call__(self, values):
|
| 73 |
+
values = self._split(values, self.sizes)
|
| 74 |
+
if len(values) != len(self.schemas):
|
| 75 |
+
raise ValueError(
|
| 76 |
+
f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!"
|
| 77 |
+
)
|
| 78 |
+
values = [m(v) for m, v in zip(self.schemas, values)]
|
| 79 |
+
return list(values)
|
| 80 |
+
|
| 81 |
+
@classmethod
|
| 82 |
+
def flatten(cls, obj):
|
| 83 |
+
res = [flatten_to_tuple(k) for k in obj]
|
| 84 |
+
values, sizes = cls._concat([k[0] for k in res])
|
| 85 |
+
return values, cls([k[1] for k in res], sizes)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@dataclass
|
| 89 |
+
class TupleSchema(ListSchema):
|
| 90 |
+
def __call__(self, values):
|
| 91 |
+
return tuple(super().__call__(values))
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@dataclass
|
| 95 |
+
class IdentitySchema(Schema):
|
| 96 |
+
def __call__(self, values):
|
| 97 |
+
return values[0]
|
| 98 |
+
|
| 99 |
+
@classmethod
|
| 100 |
+
def flatten(cls, obj):
|
| 101 |
+
return (obj,), cls()
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@dataclass
|
| 105 |
+
class DictSchema(ListSchema):
|
| 106 |
+
keys: List[str]
|
| 107 |
+
|
| 108 |
+
def __call__(self, values):
|
| 109 |
+
values = super().__call__(values)
|
| 110 |
+
return dict(zip(self.keys, values))
|
| 111 |
+
|
| 112 |
+
@classmethod
|
| 113 |
+
def flatten(cls, obj):
|
| 114 |
+
for k in obj.keys():
|
| 115 |
+
if not isinstance(k, str):
|
| 116 |
+
raise KeyError("Only support flattening dictionaries if keys are str.")
|
| 117 |
+
keys = sorted(obj.keys())
|
| 118 |
+
values = [obj[k] for k in keys]
|
| 119 |
+
ret, schema = ListSchema.flatten(values)
|
| 120 |
+
return ret, cls(schema.schemas, schema.sizes, keys)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
@dataclass
|
| 124 |
+
class InstancesSchema(DictSchema):
|
| 125 |
+
def __call__(self, values):
|
| 126 |
+
image_size, fields = values[-1], values[:-1]
|
| 127 |
+
fields = super().__call__(fields)
|
| 128 |
+
return Instances(image_size, **fields)
|
| 129 |
+
|
| 130 |
+
@classmethod
|
| 131 |
+
def flatten(cls, obj):
|
| 132 |
+
ret, schema = super().flatten(obj.get_fields())
|
| 133 |
+
size = obj.image_size
|
| 134 |
+
if not isinstance(size, torch.Tensor):
|
| 135 |
+
size = torch.tensor(size)
|
| 136 |
+
return ret + (size,), schema
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@dataclass
|
| 140 |
+
class TensorWrapSchema(Schema):
|
| 141 |
+
"""
|
| 142 |
+
For classes that are simple wrapper of tensors, e.g.
|
| 143 |
+
Boxes, RotatedBoxes, BitMasks
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
class_name: str
|
| 147 |
+
|
| 148 |
+
def __call__(self, values):
|
| 149 |
+
return locate(self.class_name)(values[0])
|
| 150 |
+
|
| 151 |
+
@classmethod
|
| 152 |
+
def flatten(cls, obj):
|
| 153 |
+
return (obj.tensor,), cls(_convert_target_to_string(type(obj)))
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# if more custom structures needed in the future, can allow
|
| 157 |
+
# passing in extra schemas for custom types
|
| 158 |
+
def flatten_to_tuple(obj):
|
| 159 |
+
"""
|
| 160 |
+
Flatten an object so it can be used for PyTorch tracing.
|
| 161 |
+
Also returns how to rebuild the original object from the flattened outputs.
|
| 162 |
+
|
| 163 |
+
Returns:
|
| 164 |
+
res (tuple): the flattened results that can be used as tracing outputs
|
| 165 |
+
schema: an object with a ``__call__`` method such that ``schema(res) == obj``.
|
| 166 |
+
It is a pure dataclass that can be serialized.
|
| 167 |
+
"""
|
| 168 |
+
schemas = [
|
| 169 |
+
((str, bytes), IdentitySchema),
|
| 170 |
+
(list, ListSchema),
|
| 171 |
+
(tuple, TupleSchema),
|
| 172 |
+
(collections.abc.Mapping, DictSchema),
|
| 173 |
+
(Instances, InstancesSchema),
|
| 174 |
+
((Boxes, ROIMasks), TensorWrapSchema),
|
| 175 |
+
]
|
| 176 |
+
for klass, schema in schemas:
|
| 177 |
+
if isinstance(obj, klass):
|
| 178 |
+
F = schema
|
| 179 |
+
break
|
| 180 |
+
else:
|
| 181 |
+
F = IdentitySchema
|
| 182 |
+
|
| 183 |
+
return F.flatten(obj)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
class TracingAdapter(nn.Module):
|
| 187 |
+
"""
|
| 188 |
+
A model may take rich input/output format (e.g. dict or custom classes),
|
| 189 |
+
but `torch.jit.trace` requires tuple of tensors as input/output.
|
| 190 |
+
This adapter flattens input/output format of a model so it becomes traceable.
|
| 191 |
+
|
| 192 |
+
It also records the necessary schema to rebuild model's inputs/outputs from flattened
|
| 193 |
+
inputs/outputs.
|
| 194 |
+
|
| 195 |
+
Example:
|
| 196 |
+
::
|
| 197 |
+
outputs = model(inputs) # inputs/outputs may be rich structure
|
| 198 |
+
adapter = TracingAdapter(model, inputs)
|
| 199 |
+
|
| 200 |
+
# can now trace the model, with adapter.flattened_inputs, or another
|
| 201 |
+
# tuple of tensors with the same length and meaning
|
| 202 |
+
traced = torch.jit.trace(adapter, adapter.flattened_inputs)
|
| 203 |
+
|
| 204 |
+
# traced model can only produce flattened outputs (tuple of tensors)
|
| 205 |
+
flattened_outputs = traced(*adapter.flattened_inputs)
|
| 206 |
+
# adapter knows the schema to convert it back (new_outputs == outputs)
|
| 207 |
+
new_outputs = adapter.outputs_schema(flattened_outputs)
|
| 208 |
+
"""
|
| 209 |
+
|
| 210 |
+
flattened_inputs: Tuple[torch.Tensor] = None
|
| 211 |
+
"""
|
| 212 |
+
Flattened version of inputs given to this class's constructor.
|
| 213 |
+
"""
|
| 214 |
+
|
| 215 |
+
inputs_schema: Schema = None
|
| 216 |
+
"""
|
| 217 |
+
Schema of the inputs given to this class's constructor.
|
| 218 |
+
"""
|
| 219 |
+
|
| 220 |
+
outputs_schema: Schema = None
|
| 221 |
+
"""
|
| 222 |
+
Schema of the output produced by calling the given model with inputs.
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
def __init__(
|
| 226 |
+
self,
|
| 227 |
+
model: nn.Module,
|
| 228 |
+
inputs,
|
| 229 |
+
inference_func: Optional[Callable] = None,
|
| 230 |
+
allow_non_tensor: bool = False,
|
| 231 |
+
):
|
| 232 |
+
"""
|
| 233 |
+
Args:
|
| 234 |
+
model: an nn.Module
|
| 235 |
+
inputs: An input argument or a tuple of input arguments used to call model.
|
| 236 |
+
After flattening, it has to only consist of tensors.
|
| 237 |
+
inference_func: a callable that takes (model, *inputs), calls the
|
| 238 |
+
model with inputs, and return outputs. By default it
|
| 239 |
+
is ``lambda model, *inputs: model(*inputs)``. Can be override
|
| 240 |
+
if you need to call the model differently.
|
| 241 |
+
allow_non_tensor: allow inputs/outputs to contain non-tensor objects.
|
| 242 |
+
This option will filter out non-tensor objects to make the
|
| 243 |
+
model traceable, but ``inputs_schema``/``outputs_schema`` cannot be
|
| 244 |
+
used anymore because inputs/outputs cannot be rebuilt from pure tensors.
|
| 245 |
+
This is useful when you're only interested in the single trace of
|
| 246 |
+
execution (e.g. for flop count), but not interested in
|
| 247 |
+
generalizing the traced graph to new inputs.
|
| 248 |
+
"""
|
| 249 |
+
super().__init__()
|
| 250 |
+
if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
|
| 251 |
+
model = model.module
|
| 252 |
+
self.model = model
|
| 253 |
+
if not isinstance(inputs, tuple):
|
| 254 |
+
inputs = (inputs,)
|
| 255 |
+
self.inputs = inputs
|
| 256 |
+
self.allow_non_tensor = allow_non_tensor
|
| 257 |
+
|
| 258 |
+
if inference_func is None:
|
| 259 |
+
inference_func = lambda model, *inputs: model(*inputs) # noqa
|
| 260 |
+
self.inference_func = inference_func
|
| 261 |
+
|
| 262 |
+
self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs)
|
| 263 |
+
|
| 264 |
+
if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs):
|
| 265 |
+
return
|
| 266 |
+
if self.allow_non_tensor:
|
| 267 |
+
self.flattened_inputs = tuple(
|
| 268 |
+
[x for x in self.flattened_inputs if isinstance(x, torch.Tensor)]
|
| 269 |
+
)
|
| 270 |
+
self.inputs_schema = None
|
| 271 |
+
else:
|
| 272 |
+
for input in self.flattened_inputs:
|
| 273 |
+
if not isinstance(input, torch.Tensor):
|
| 274 |
+
raise ValueError(
|
| 275 |
+
"Inputs for tracing must only contain tensors. "
|
| 276 |
+
f"Got a {type(input)} instead."
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
def forward(self, *args: torch.Tensor):
|
| 280 |
+
with torch.no_grad(), patch_builtin_len():
|
| 281 |
+
if self.inputs_schema is not None:
|
| 282 |
+
inputs_orig_format = self.inputs_schema(args)
|
| 283 |
+
else:
|
| 284 |
+
if len(args) != len(self.flattened_inputs) or any(
|
| 285 |
+
x is not y for x, y in zip(args, self.flattened_inputs)
|
| 286 |
+
):
|
| 287 |
+
raise ValueError(
|
| 288 |
+
"TracingAdapter does not contain valid inputs_schema."
|
| 289 |
+
" So it cannot generalize to other inputs and must be"
|
| 290 |
+
" traced with `.flattened_inputs`."
|
| 291 |
+
)
|
| 292 |
+
inputs_orig_format = self.inputs
|
| 293 |
+
|
| 294 |
+
outputs = self.inference_func(self.model, *inputs_orig_format)
|
| 295 |
+
flattened_outputs, schema = flatten_to_tuple(outputs)
|
| 296 |
+
|
| 297 |
+
flattened_output_tensors = tuple(
|
| 298 |
+
[x for x in flattened_outputs if isinstance(x, torch.Tensor)]
|
| 299 |
+
)
|
| 300 |
+
if len(flattened_output_tensors) < len(flattened_outputs):
|
| 301 |
+
if self.allow_non_tensor:
|
| 302 |
+
flattened_outputs = flattened_output_tensors
|
| 303 |
+
self.outputs_schema = None
|
| 304 |
+
else:
|
| 305 |
+
raise ValueError(
|
| 306 |
+
"Model cannot be traced because some model outputs "
|
| 307 |
+
"cannot flatten to tensors."
|
| 308 |
+
)
|
| 309 |
+
else: # schema is valid
|
| 310 |
+
if self.outputs_schema is None:
|
| 311 |
+
self.outputs_schema = schema
|
| 312 |
+
else:
|
| 313 |
+
assert self.outputs_schema == schema, (
|
| 314 |
+
"Model should always return outputs with the same "
|
| 315 |
+
"structure so it can be traced!"
|
| 316 |
+
)
|
| 317 |
+
return flattened_outputs
|
| 318 |
+
|
| 319 |
+
def _create_wrapper(self, traced_model):
|
| 320 |
+
"""
|
| 321 |
+
Return a function that has an input/output interface the same as the
|
| 322 |
+
original model, but it calls the given traced model under the hood.
|
| 323 |
+
"""
|
| 324 |
+
|
| 325 |
+
def forward(*args):
|
| 326 |
+
flattened_inputs, _ = flatten_to_tuple(args)
|
| 327 |
+
flattened_outputs = traced_model(*flattened_inputs)
|
| 328 |
+
return self.outputs_schema(flattened_outputs)
|
| 329 |
+
|
| 330 |
+
return forward
|
RAVE-main/annotator/oneformer/detectron2/export/shared.py
ADDED
|
@@ -0,0 +1,1039 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import collections
|
| 4 |
+
import copy
|
| 5 |
+
import functools
|
| 6 |
+
import logging
|
| 7 |
+
import numpy as np
|
| 8 |
+
import os
|
| 9 |
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
| 10 |
+
from unittest import mock
|
| 11 |
+
import caffe2.python.utils as putils
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
from caffe2.proto import caffe2_pb2
|
| 15 |
+
from caffe2.python import core, net_drawer, workspace
|
| 16 |
+
from torch.nn.functional import interpolate as interp
|
| 17 |
+
|
| 18 |
+
logger = logging.getLogger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# ==== torch/utils_toffee/cast.py =======================================
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def to_device(t, device_str):
|
| 25 |
+
"""
|
| 26 |
+
This function is a replacement of .to(another_device) such that it allows the
|
| 27 |
+
casting to be traced properly by explicitly calling the underlying copy ops.
|
| 28 |
+
It also avoids introducing unncessary op when casting to the same device.
|
| 29 |
+
"""
|
| 30 |
+
src = t.device
|
| 31 |
+
dst = torch.device(device_str)
|
| 32 |
+
|
| 33 |
+
if src == dst:
|
| 34 |
+
return t
|
| 35 |
+
elif src.type == "cuda" and dst.type == "cpu":
|
| 36 |
+
return torch.ops._caffe2.CopyGPUToCPU(t)
|
| 37 |
+
elif src.type == "cpu" and dst.type == "cuda":
|
| 38 |
+
return torch.ops._caffe2.CopyCPUToGPU(t)
|
| 39 |
+
else:
|
| 40 |
+
raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst))
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# ==== torch/utils_toffee/interpolate.py =======================================
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py
|
| 47 |
+
def BilinearInterpolation(tensor_in, up_scale):
|
| 48 |
+
assert up_scale % 2 == 0, "Scale should be even"
|
| 49 |
+
|
| 50 |
+
def upsample_filt(size):
|
| 51 |
+
factor = (size + 1) // 2
|
| 52 |
+
if size % 2 == 1:
|
| 53 |
+
center = factor - 1
|
| 54 |
+
else:
|
| 55 |
+
center = factor - 0.5
|
| 56 |
+
|
| 57 |
+
og = np.ogrid[:size, :size]
|
| 58 |
+
return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
|
| 59 |
+
|
| 60 |
+
kernel_size = int(up_scale) * 2
|
| 61 |
+
bil_filt = upsample_filt(kernel_size)
|
| 62 |
+
|
| 63 |
+
dim = int(tensor_in.shape[1])
|
| 64 |
+
kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
|
| 65 |
+
kernel[range(dim), range(dim), :, :] = bil_filt
|
| 66 |
+
|
| 67 |
+
tensor_out = F.conv_transpose2d(
|
| 68 |
+
tensor_in,
|
| 69 |
+
weight=to_device(torch.Tensor(kernel), tensor_in.device),
|
| 70 |
+
bias=None,
|
| 71 |
+
stride=int(up_scale),
|
| 72 |
+
padding=int(up_scale / 2),
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
return tensor_out
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
|
| 79 |
+
# using dynamic `scale_factor` rather than static `size`. (T43166860)
|
| 80 |
+
# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly.
|
| 81 |
+
def onnx_compatibale_interpolate(
|
| 82 |
+
input, size=None, scale_factor=None, mode="nearest", align_corners=None
|
| 83 |
+
):
|
| 84 |
+
# NOTE: The input dimensions are interpreted in the form:
|
| 85 |
+
# `mini-batch x channels x [optional depth] x [optional height] x width`.
|
| 86 |
+
if size is None and scale_factor is not None:
|
| 87 |
+
if input.dim() == 4:
|
| 88 |
+
if isinstance(scale_factor, (int, float)):
|
| 89 |
+
height_scale, width_scale = (scale_factor, scale_factor)
|
| 90 |
+
else:
|
| 91 |
+
assert isinstance(scale_factor, (tuple, list))
|
| 92 |
+
assert len(scale_factor) == 2
|
| 93 |
+
height_scale, width_scale = scale_factor
|
| 94 |
+
|
| 95 |
+
assert not align_corners, "No matching C2 op for align_corners == True"
|
| 96 |
+
if mode == "nearest":
|
| 97 |
+
return torch.ops._caffe2.ResizeNearest(
|
| 98 |
+
input, order="NCHW", width_scale=width_scale, height_scale=height_scale
|
| 99 |
+
)
|
| 100 |
+
elif mode == "bilinear":
|
| 101 |
+
logger.warning(
|
| 102 |
+
"Use F.conv_transpose2d for bilinear interpolate"
|
| 103 |
+
" because there's no such C2 op, this may cause significant"
|
| 104 |
+
" slowdown and the boundary pixels won't be as same as"
|
| 105 |
+
" using F.interpolate due to padding."
|
| 106 |
+
)
|
| 107 |
+
assert height_scale == width_scale
|
| 108 |
+
return BilinearInterpolation(input, up_scale=height_scale)
|
| 109 |
+
logger.warning("Output size is not static, it might cause ONNX conversion issue")
|
| 110 |
+
|
| 111 |
+
return interp(input, size, scale_factor, mode, align_corners)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def mock_torch_nn_functional_interpolate():
|
| 115 |
+
def decorator(func):
|
| 116 |
+
@functools.wraps(func)
|
| 117 |
+
def _mock_torch_nn_functional_interpolate(*args, **kwargs):
|
| 118 |
+
if torch.onnx.is_in_onnx_export():
|
| 119 |
+
with mock.patch(
|
| 120 |
+
"torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate
|
| 121 |
+
):
|
| 122 |
+
return func(*args, **kwargs)
|
| 123 |
+
else:
|
| 124 |
+
return func(*args, **kwargs)
|
| 125 |
+
|
| 126 |
+
return _mock_torch_nn_functional_interpolate
|
| 127 |
+
|
| 128 |
+
return decorator
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
# ==== torch/utils_caffe2/ws_utils.py ==========================================
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class ScopedWS(object):
|
| 135 |
+
def __init__(self, ws_name, is_reset, is_cleanup=False):
|
| 136 |
+
self.ws_name = ws_name
|
| 137 |
+
self.is_reset = is_reset
|
| 138 |
+
self.is_cleanup = is_cleanup
|
| 139 |
+
self.org_ws = ""
|
| 140 |
+
|
| 141 |
+
def __enter__(self):
|
| 142 |
+
self.org_ws = workspace.CurrentWorkspace()
|
| 143 |
+
if self.ws_name is not None:
|
| 144 |
+
workspace.SwitchWorkspace(self.ws_name, True)
|
| 145 |
+
if self.is_reset:
|
| 146 |
+
workspace.ResetWorkspace()
|
| 147 |
+
|
| 148 |
+
return workspace
|
| 149 |
+
|
| 150 |
+
def __exit__(self, *args):
|
| 151 |
+
if self.is_cleanup:
|
| 152 |
+
workspace.ResetWorkspace()
|
| 153 |
+
if self.ws_name is not None:
|
| 154 |
+
workspace.SwitchWorkspace(self.org_ws)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def fetch_any_blob(name):
|
| 158 |
+
bb = None
|
| 159 |
+
try:
|
| 160 |
+
bb = workspace.FetchBlob(name)
|
| 161 |
+
except TypeError:
|
| 162 |
+
bb = workspace.FetchInt8Blob(name)
|
| 163 |
+
except Exception as e:
|
| 164 |
+
logger.error("Get blob {} error: {}".format(name, e))
|
| 165 |
+
|
| 166 |
+
return bb
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# ==== torch/utils_caffe2/protobuf.py ==========================================
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def get_pb_arg(pb, arg_name):
|
| 173 |
+
for x in pb.arg:
|
| 174 |
+
if x.name == arg_name:
|
| 175 |
+
return x
|
| 176 |
+
return None
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def get_pb_arg_valf(pb, arg_name, default_val):
|
| 180 |
+
arg = get_pb_arg(pb, arg_name)
|
| 181 |
+
return arg.f if arg is not None else default_val
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def get_pb_arg_floats(pb, arg_name, default_val):
|
| 185 |
+
arg = get_pb_arg(pb, arg_name)
|
| 186 |
+
return list(map(float, arg.floats)) if arg is not None else default_val
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def get_pb_arg_ints(pb, arg_name, default_val):
|
| 190 |
+
arg = get_pb_arg(pb, arg_name)
|
| 191 |
+
return list(map(int, arg.ints)) if arg is not None else default_val
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def get_pb_arg_vali(pb, arg_name, default_val):
|
| 195 |
+
arg = get_pb_arg(pb, arg_name)
|
| 196 |
+
return arg.i if arg is not None else default_val
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def get_pb_arg_vals(pb, arg_name, default_val):
|
| 200 |
+
arg = get_pb_arg(pb, arg_name)
|
| 201 |
+
return arg.s if arg is not None else default_val
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def get_pb_arg_valstrings(pb, arg_name, default_val):
|
| 205 |
+
arg = get_pb_arg(pb, arg_name)
|
| 206 |
+
return list(arg.strings) if arg is not None else default_val
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False):
|
| 210 |
+
arg = get_pb_arg(pb, arg_name)
|
| 211 |
+
if arg is None:
|
| 212 |
+
arg = putils.MakeArgument(arg_name, arg_value)
|
| 213 |
+
assert hasattr(arg, arg_attr)
|
| 214 |
+
pb.arg.extend([arg])
|
| 215 |
+
if allow_override and getattr(arg, arg_attr) != arg_value:
|
| 216 |
+
logger.warning(
|
| 217 |
+
"Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value)
|
| 218 |
+
)
|
| 219 |
+
setattr(arg, arg_attr, arg_value)
|
| 220 |
+
else:
|
| 221 |
+
assert arg is not None
|
| 222 |
+
assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format(
|
| 223 |
+
getattr(arg, arg_attr), arg_value
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def _create_const_fill_op_from_numpy(name, tensor, device_option=None):
|
| 228 |
+
assert type(tensor) == np.ndarray
|
| 229 |
+
kTypeNameMapper = {
|
| 230 |
+
np.dtype("float32"): "GivenTensorFill",
|
| 231 |
+
np.dtype("int32"): "GivenTensorIntFill",
|
| 232 |
+
np.dtype("int64"): "GivenTensorInt64Fill",
|
| 233 |
+
np.dtype("uint8"): "GivenTensorStringFill",
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
args_dict = {}
|
| 237 |
+
if tensor.dtype == np.dtype("uint8"):
|
| 238 |
+
args_dict.update({"values": [str(tensor.data)], "shape": [1]})
|
| 239 |
+
else:
|
| 240 |
+
args_dict.update({"values": tensor, "shape": tensor.shape})
|
| 241 |
+
|
| 242 |
+
if device_option is not None:
|
| 243 |
+
args_dict["device_option"] = device_option
|
| 244 |
+
|
| 245 |
+
return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor):
|
| 249 |
+
assert type(int8_tensor) == workspace.Int8Tensor
|
| 250 |
+
kTypeNameMapper = {
|
| 251 |
+
np.dtype("int32"): "Int8GivenIntTensorFill",
|
| 252 |
+
np.dtype("uint8"): "Int8GivenTensorFill",
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
tensor = int8_tensor.data
|
| 256 |
+
assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")]
|
| 257 |
+
values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor
|
| 258 |
+
|
| 259 |
+
return core.CreateOperator(
|
| 260 |
+
kTypeNameMapper[tensor.dtype],
|
| 261 |
+
[],
|
| 262 |
+
[name],
|
| 263 |
+
values=values,
|
| 264 |
+
shape=tensor.shape,
|
| 265 |
+
Y_scale=int8_tensor.scale,
|
| 266 |
+
Y_zero_point=int8_tensor.zero_point,
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def create_const_fill_op(
|
| 271 |
+
name: str,
|
| 272 |
+
blob: Union[np.ndarray, workspace.Int8Tensor],
|
| 273 |
+
device_option: Optional[caffe2_pb2.DeviceOption] = None,
|
| 274 |
+
) -> caffe2_pb2.OperatorDef:
|
| 275 |
+
"""
|
| 276 |
+
Given a blob object, return the Caffe2 operator that creates this blob
|
| 277 |
+
as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
|
| 278 |
+
"""
|
| 279 |
+
|
| 280 |
+
tensor_type = type(blob)
|
| 281 |
+
assert tensor_type in [
|
| 282 |
+
np.ndarray,
|
| 283 |
+
workspace.Int8Tensor,
|
| 284 |
+
], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
|
| 285 |
+
name, type(blob)
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
if tensor_type == np.ndarray:
|
| 289 |
+
return _create_const_fill_op_from_numpy(name, blob, device_option)
|
| 290 |
+
elif tensor_type == workspace.Int8Tensor:
|
| 291 |
+
assert device_option is None
|
| 292 |
+
return _create_const_fill_op_from_c2_int8_tensor(name, blob)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def construct_init_net_from_params(
|
| 296 |
+
params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
|
| 297 |
+
) -> caffe2_pb2.NetDef:
|
| 298 |
+
"""
|
| 299 |
+
Construct the init_net from params dictionary
|
| 300 |
+
"""
|
| 301 |
+
init_net = caffe2_pb2.NetDef()
|
| 302 |
+
device_options = device_options or {}
|
| 303 |
+
for name, blob in params.items():
|
| 304 |
+
if isinstance(blob, str):
|
| 305 |
+
logger.warning(
|
| 306 |
+
(
|
| 307 |
+
"Blob {} with type {} is not supported in generating init net,"
|
| 308 |
+
" skipped.".format(name, type(blob))
|
| 309 |
+
)
|
| 310 |
+
)
|
| 311 |
+
continue
|
| 312 |
+
init_net.op.extend(
|
| 313 |
+
[create_const_fill_op(name, blob, device_option=device_options.get(name, None))]
|
| 314 |
+
)
|
| 315 |
+
init_net.external_output.append(name)
|
| 316 |
+
return init_net
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def get_producer_map(ssa):
|
| 320 |
+
"""
|
| 321 |
+
Return dict from versioned blob to (i, j),
|
| 322 |
+
where i is index of producer op, j is the index of output of that op.
|
| 323 |
+
"""
|
| 324 |
+
producer_map = {}
|
| 325 |
+
for i in range(len(ssa)):
|
| 326 |
+
outputs = ssa[i][1]
|
| 327 |
+
for j, outp in enumerate(outputs):
|
| 328 |
+
producer_map[outp] = (i, j)
|
| 329 |
+
return producer_map
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def get_consumer_map(ssa):
|
| 333 |
+
"""
|
| 334 |
+
Return dict from versioned blob to list of (i, j),
|
| 335 |
+
where i is index of consumer op, j is the index of input of that op.
|
| 336 |
+
"""
|
| 337 |
+
consumer_map = collections.defaultdict(list)
|
| 338 |
+
for i in range(len(ssa)):
|
| 339 |
+
inputs = ssa[i][0]
|
| 340 |
+
for j, inp in enumerate(inputs):
|
| 341 |
+
consumer_map[inp].append((i, j))
|
| 342 |
+
return consumer_map
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def get_params_from_init_net(
|
| 346 |
+
init_net: caffe2_pb2.NetDef,
|
| 347 |
+
) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
|
| 348 |
+
"""
|
| 349 |
+
Take the output blobs from init_net by running it.
|
| 350 |
+
Outputs:
|
| 351 |
+
params: dict from blob name to numpy array
|
| 352 |
+
device_options: dict from blob name to the device option of its creating op
|
| 353 |
+
"""
|
| 354 |
+
# NOTE: this assumes that the params is determined by producer op with the
|
| 355 |
+
# only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
|
| 356 |
+
def _get_device_option(producer_op):
|
| 357 |
+
if producer_op.type == "CopyGPUToCPU":
|
| 358 |
+
return caffe2_pb2.DeviceOption()
|
| 359 |
+
else:
|
| 360 |
+
return producer_op.device_option
|
| 361 |
+
|
| 362 |
+
with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws:
|
| 363 |
+
ws.RunNetOnce(init_net)
|
| 364 |
+
params = {b: fetch_any_blob(b) for b in init_net.external_output}
|
| 365 |
+
ssa, versions = core.get_ssa(init_net)
|
| 366 |
+
producer_map = get_producer_map(ssa)
|
| 367 |
+
device_options = {
|
| 368 |
+
b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
|
| 369 |
+
for b in init_net.external_output
|
| 370 |
+
}
|
| 371 |
+
return params, device_options
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def _updater_raise(op, input_types, output_types):
|
| 375 |
+
raise RuntimeError(
|
| 376 |
+
"Failed to apply updater for op {} given input_types {} and"
|
| 377 |
+
" output_types {}".format(op, input_types, output_types)
|
| 378 |
+
)
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def _generic_status_identifier(
|
| 382 |
+
predict_net: caffe2_pb2.NetDef,
|
| 383 |
+
status_updater: Callable,
|
| 384 |
+
known_status: Dict[Tuple[str, int], Any],
|
| 385 |
+
) -> Dict[Tuple[str, int], Any]:
|
| 386 |
+
"""
|
| 387 |
+
Statically infer the status of each blob, the status can be such as device type
|
| 388 |
+
(CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here
|
| 389 |
+
is versioned blob (Tuple[str, int]) in the format compatible with ssa.
|
| 390 |
+
Inputs:
|
| 391 |
+
predict_net: the caffe2 network
|
| 392 |
+
status_updater: a callable, given an op and the status of its input/output,
|
| 393 |
+
it returns the updated status of input/output. `None` is used for
|
| 394 |
+
representing unknown status.
|
| 395 |
+
known_status: a dict containing known status, used as initialization.
|
| 396 |
+
Outputs:
|
| 397 |
+
A dict mapping from versioned blob to its status
|
| 398 |
+
"""
|
| 399 |
+
ssa, versions = core.get_ssa(predict_net)
|
| 400 |
+
versioned_ext_input = [(b, 0) for b in predict_net.external_input]
|
| 401 |
+
versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output]
|
| 402 |
+
all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa])
|
| 403 |
+
|
| 404 |
+
allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output)
|
| 405 |
+
assert all(k in allowed_vbs for k in known_status)
|
| 406 |
+
assert all(v is not None for v in known_status.values())
|
| 407 |
+
_known_status = copy.deepcopy(known_status)
|
| 408 |
+
|
| 409 |
+
def _check_and_update(key, value):
|
| 410 |
+
assert value is not None
|
| 411 |
+
if key in _known_status:
|
| 412 |
+
if not _known_status[key] == value:
|
| 413 |
+
raise RuntimeError(
|
| 414 |
+
"Confilict status for {}, existing status {}, new status {}".format(
|
| 415 |
+
key, _known_status[key], value
|
| 416 |
+
)
|
| 417 |
+
)
|
| 418 |
+
_known_status[key] = value
|
| 419 |
+
|
| 420 |
+
def _update_i(op, ssa_i):
|
| 421 |
+
versioned_inputs = ssa_i[0]
|
| 422 |
+
versioned_outputs = ssa_i[1]
|
| 423 |
+
|
| 424 |
+
inputs_status = [_known_status.get(b, None) for b in versioned_inputs]
|
| 425 |
+
outputs_status = [_known_status.get(b, None) for b in versioned_outputs]
|
| 426 |
+
|
| 427 |
+
new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status)
|
| 428 |
+
|
| 429 |
+
for versioned_blob, status in zip(
|
| 430 |
+
versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status
|
| 431 |
+
):
|
| 432 |
+
if status is not None:
|
| 433 |
+
_check_and_update(versioned_blob, status)
|
| 434 |
+
|
| 435 |
+
for op, ssa_i in zip(predict_net.op, ssa):
|
| 436 |
+
_update_i(op, ssa_i)
|
| 437 |
+
for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)):
|
| 438 |
+
_update_i(op, ssa_i)
|
| 439 |
+
|
| 440 |
+
# NOTE: This strictly checks all the blob from predict_net must be assgined
|
| 441 |
+
# a known status. However sometimes it's impossible (eg. having deadend op),
|
| 442 |
+
# we may relax this constraint if
|
| 443 |
+
for k in all_versioned_blobs:
|
| 444 |
+
if k not in _known_status:
|
| 445 |
+
raise NotImplementedError(
|
| 446 |
+
"Can not infer the status for {}. Currently only support the case where"
|
| 447 |
+
" a single forward and backward pass can identify status for all blobs.".format(k)
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
return _known_status
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def infer_device_type(
|
| 454 |
+
predict_net: caffe2_pb2.NetDef,
|
| 455 |
+
known_status: Dict[Tuple[str, int], Any],
|
| 456 |
+
device_name_style: str = "caffe2",
|
| 457 |
+
) -> Dict[Tuple[str, int], str]:
|
| 458 |
+
"""Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob"""
|
| 459 |
+
|
| 460 |
+
assert device_name_style in ["caffe2", "pytorch"]
|
| 461 |
+
_CPU_STR = "cpu"
|
| 462 |
+
_GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda"
|
| 463 |
+
|
| 464 |
+
def _copy_cpu_to_gpu_updater(op, input_types, output_types):
|
| 465 |
+
if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR:
|
| 466 |
+
_updater_raise(op, input_types, output_types)
|
| 467 |
+
return ([_CPU_STR], [_GPU_STR])
|
| 468 |
+
|
| 469 |
+
def _copy_gpu_to_cpu_updater(op, input_types, output_types):
|
| 470 |
+
if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR:
|
| 471 |
+
_updater_raise(op, input_types, output_types)
|
| 472 |
+
return ([_GPU_STR], [_CPU_STR])
|
| 473 |
+
|
| 474 |
+
def _other_ops_updater(op, input_types, output_types):
|
| 475 |
+
non_none_types = [x for x in input_types + output_types if x is not None]
|
| 476 |
+
if len(non_none_types) > 0:
|
| 477 |
+
the_type = non_none_types[0]
|
| 478 |
+
if not all(x == the_type for x in non_none_types):
|
| 479 |
+
_updater_raise(op, input_types, output_types)
|
| 480 |
+
else:
|
| 481 |
+
the_type = None
|
| 482 |
+
return ([the_type for _ in op.input], [the_type for _ in op.output])
|
| 483 |
+
|
| 484 |
+
def _device_updater(op, *args, **kwargs):
|
| 485 |
+
return {
|
| 486 |
+
"CopyCPUToGPU": _copy_cpu_to_gpu_updater,
|
| 487 |
+
"CopyGPUToCPU": _copy_gpu_to_cpu_updater,
|
| 488 |
+
}.get(op.type, _other_ops_updater)(op, *args, **kwargs)
|
| 489 |
+
|
| 490 |
+
return _generic_status_identifier(predict_net, _device_updater, known_status)
|
| 491 |
+
|
| 492 |
+
|
| 493 |
+
# ==== torch/utils_caffe2/vis.py ===============================================
|
| 494 |
+
|
| 495 |
+
|
| 496 |
+
def _modify_blob_names(ops, blob_rename_f):
|
| 497 |
+
ret = []
|
| 498 |
+
|
| 499 |
+
def _replace_list(blob_list, replaced_list):
|
| 500 |
+
del blob_list[:]
|
| 501 |
+
blob_list.extend(replaced_list)
|
| 502 |
+
|
| 503 |
+
for x in ops:
|
| 504 |
+
cur = copy.deepcopy(x)
|
| 505 |
+
_replace_list(cur.input, list(map(blob_rename_f, cur.input)))
|
| 506 |
+
_replace_list(cur.output, list(map(blob_rename_f, cur.output)))
|
| 507 |
+
ret.append(cur)
|
| 508 |
+
|
| 509 |
+
return ret
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
def _rename_blob(name, blob_sizes, blob_ranges):
|
| 513 |
+
def _list_to_str(bsize):
|
| 514 |
+
ret = ", ".join([str(x) for x in bsize])
|
| 515 |
+
ret = "[" + ret + "]"
|
| 516 |
+
return ret
|
| 517 |
+
|
| 518 |
+
ret = name
|
| 519 |
+
if blob_sizes is not None and name in blob_sizes:
|
| 520 |
+
ret += "\n" + _list_to_str(blob_sizes[name])
|
| 521 |
+
if blob_ranges is not None and name in blob_ranges:
|
| 522 |
+
ret += "\n" + _list_to_str(blob_ranges[name])
|
| 523 |
+
|
| 524 |
+
return ret
|
| 525 |
+
|
| 526 |
+
|
| 527 |
+
# graph_name could not contain word 'graph'
|
| 528 |
+
def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None):
|
| 529 |
+
blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges)
|
| 530 |
+
return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f)
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None):
|
| 534 |
+
graph = None
|
| 535 |
+
ops = net.op
|
| 536 |
+
if blob_rename_func is not None:
|
| 537 |
+
ops = _modify_blob_names(ops, blob_rename_func)
|
| 538 |
+
if not op_only:
|
| 539 |
+
graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB")
|
| 540 |
+
else:
|
| 541 |
+
graph = net_drawer.GetPydotGraphMinimal(
|
| 542 |
+
ops, graph_name, rankdir="TB", minimal_dependency=True
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
try:
|
| 546 |
+
par_dir = os.path.dirname(file_name)
|
| 547 |
+
if not os.path.exists(par_dir):
|
| 548 |
+
os.makedirs(par_dir)
|
| 549 |
+
|
| 550 |
+
format = os.path.splitext(os.path.basename(file_name))[-1]
|
| 551 |
+
if format == ".png":
|
| 552 |
+
graph.write_png(file_name)
|
| 553 |
+
elif format == ".pdf":
|
| 554 |
+
graph.write_pdf(file_name)
|
| 555 |
+
elif format == ".svg":
|
| 556 |
+
graph.write_svg(file_name)
|
| 557 |
+
else:
|
| 558 |
+
print("Incorrect format {}".format(format))
|
| 559 |
+
except Exception as e:
|
| 560 |
+
print("Error when writing graph to image {}".format(e))
|
| 561 |
+
|
| 562 |
+
return graph
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
# ==== torch/utils_toffee/aten_to_caffe2.py ====================================
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef):
|
| 569 |
+
"""
|
| 570 |
+
For ONNX exported model, GroupNorm will be represented as ATen op,
|
| 571 |
+
this can be a drop in replacement from ATen to GroupNorm
|
| 572 |
+
"""
|
| 573 |
+
count = 0
|
| 574 |
+
for op in predict_net.op:
|
| 575 |
+
if op.type == "ATen":
|
| 576 |
+
op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3
|
| 577 |
+
if op_name and op_name.decode() == "group_norm":
|
| 578 |
+
op.arg.remove(get_pb_arg(op, "operator"))
|
| 579 |
+
|
| 580 |
+
if get_pb_arg_vali(op, "cudnn_enabled", None):
|
| 581 |
+
op.arg.remove(get_pb_arg(op, "cudnn_enabled"))
|
| 582 |
+
|
| 583 |
+
num_groups = get_pb_arg_vali(op, "num_groups", None)
|
| 584 |
+
if num_groups is not None:
|
| 585 |
+
op.arg.remove(get_pb_arg(op, "num_groups"))
|
| 586 |
+
check_set_pb_arg(op, "group", "i", num_groups)
|
| 587 |
+
|
| 588 |
+
op.type = "GroupNorm"
|
| 589 |
+
count += 1
|
| 590 |
+
if count > 1:
|
| 591 |
+
logger.info("Replaced {} ATen operator to GroupNormOp".format(count))
|
| 592 |
+
|
| 593 |
+
|
| 594 |
+
# ==== torch/utils_toffee/alias.py =============================================
|
| 595 |
+
|
| 596 |
+
|
| 597 |
+
def alias(x, name, is_backward=False):
|
| 598 |
+
if not torch.onnx.is_in_onnx_export():
|
| 599 |
+
return x
|
| 600 |
+
assert isinstance(x, torch.Tensor)
|
| 601 |
+
return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward)
|
| 602 |
+
|
| 603 |
+
|
| 604 |
+
def fuse_alias_placeholder(predict_net, init_net):
|
| 605 |
+
"""Remove AliasWithName placeholder and rename the input/output of it"""
|
| 606 |
+
# First we finish all the re-naming
|
| 607 |
+
for i, op in enumerate(predict_net.op):
|
| 608 |
+
if op.type == "AliasWithName":
|
| 609 |
+
assert len(op.input) == 1
|
| 610 |
+
assert len(op.output) == 1
|
| 611 |
+
name = get_pb_arg_vals(op, "name", None).decode()
|
| 612 |
+
is_backward = bool(get_pb_arg_vali(op, "is_backward", 0))
|
| 613 |
+
rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward)
|
| 614 |
+
rename_op_output(predict_net, i, 0, name)
|
| 615 |
+
|
| 616 |
+
# Remove AliasWithName, should be very safe since it's a non-op
|
| 617 |
+
new_ops = []
|
| 618 |
+
for op in predict_net.op:
|
| 619 |
+
if op.type != "AliasWithName":
|
| 620 |
+
new_ops.append(op)
|
| 621 |
+
else:
|
| 622 |
+
# safety check
|
| 623 |
+
assert op.input == op.output
|
| 624 |
+
assert op.input[0] == op.arg[0].s.decode()
|
| 625 |
+
del predict_net.op[:]
|
| 626 |
+
predict_net.op.extend(new_ops)
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
# ==== torch/utils_caffe2/graph_transform.py ===================================
|
| 630 |
+
|
| 631 |
+
|
| 632 |
+
class IllegalGraphTransformError(ValueError):
|
| 633 |
+
"""When a graph transform function call can't be executed."""
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def _rename_versioned_blob_in_proto(
|
| 637 |
+
proto: caffe2_pb2.NetDef,
|
| 638 |
+
old_name: str,
|
| 639 |
+
new_name: str,
|
| 640 |
+
version: int,
|
| 641 |
+
ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]],
|
| 642 |
+
start_versions: Dict[str, int],
|
| 643 |
+
end_versions: Dict[str, int],
|
| 644 |
+
):
|
| 645 |
+
"""In given proto, rename all blobs with matched version"""
|
| 646 |
+
# Operater list
|
| 647 |
+
for op, i_th_ssa in zip(proto.op, ssa):
|
| 648 |
+
versioned_inputs, versioned_outputs = i_th_ssa
|
| 649 |
+
for i in range(len(op.input)):
|
| 650 |
+
if versioned_inputs[i] == (old_name, version):
|
| 651 |
+
op.input[i] = new_name
|
| 652 |
+
for i in range(len(op.output)):
|
| 653 |
+
if versioned_outputs[i] == (old_name, version):
|
| 654 |
+
op.output[i] = new_name
|
| 655 |
+
# external_input
|
| 656 |
+
if start_versions.get(old_name, 0) == version:
|
| 657 |
+
for i in range(len(proto.external_input)):
|
| 658 |
+
if proto.external_input[i] == old_name:
|
| 659 |
+
proto.external_input[i] = new_name
|
| 660 |
+
# external_output
|
| 661 |
+
if end_versions.get(old_name, 0) == version:
|
| 662 |
+
for i in range(len(proto.external_output)):
|
| 663 |
+
if proto.external_output[i] == old_name:
|
| 664 |
+
proto.external_output[i] = new_name
|
| 665 |
+
|
| 666 |
+
|
| 667 |
+
def rename_op_input(
|
| 668 |
+
predict_net: caffe2_pb2.NetDef,
|
| 669 |
+
init_net: caffe2_pb2.NetDef,
|
| 670 |
+
op_id: int,
|
| 671 |
+
input_id: int,
|
| 672 |
+
new_name: str,
|
| 673 |
+
from_producer: bool = False,
|
| 674 |
+
):
|
| 675 |
+
"""
|
| 676 |
+
Rename the op_id-th operator in predict_net, change it's input_id-th input's
|
| 677 |
+
name to the new_name. It also does automatic re-route and change
|
| 678 |
+
external_input and init_net if necessary.
|
| 679 |
+
- It requires the input is only consumed by this op.
|
| 680 |
+
- This function modifies predict_net and init_net in-place.
|
| 681 |
+
- When from_producer is enable, this also updates other operators that consumes
|
| 682 |
+
the same input. Be cautious because may trigger unintended behavior.
|
| 683 |
+
"""
|
| 684 |
+
assert isinstance(predict_net, caffe2_pb2.NetDef)
|
| 685 |
+
assert isinstance(init_net, caffe2_pb2.NetDef)
|
| 686 |
+
|
| 687 |
+
init_net_ssa, init_net_versions = core.get_ssa(init_net)
|
| 688 |
+
predict_net_ssa, predict_net_versions = core.get_ssa(
|
| 689 |
+
predict_net, copy.deepcopy(init_net_versions)
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
versioned_inputs, versioned_outputs = predict_net_ssa[op_id]
|
| 693 |
+
old_name, version = versioned_inputs[input_id]
|
| 694 |
+
|
| 695 |
+
if from_producer:
|
| 696 |
+
producer_map = get_producer_map(predict_net_ssa)
|
| 697 |
+
if not (old_name, version) in producer_map:
|
| 698 |
+
raise NotImplementedError(
|
| 699 |
+
"Can't find producer, the input {} is probably from"
|
| 700 |
+
" init_net, this is not supported yet.".format(old_name)
|
| 701 |
+
)
|
| 702 |
+
producer = producer_map[(old_name, version)]
|
| 703 |
+
rename_op_output(predict_net, producer[0], producer[1], new_name)
|
| 704 |
+
return
|
| 705 |
+
|
| 706 |
+
def contain_targets(op_ssa):
|
| 707 |
+
return (old_name, version) in op_ssa[0]
|
| 708 |
+
|
| 709 |
+
is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa]
|
| 710 |
+
if sum(is_consumer) > 1:
|
| 711 |
+
raise IllegalGraphTransformError(
|
| 712 |
+
(
|
| 713 |
+
"Input '{}' of operator(#{}) are consumed by other ops, please use"
|
| 714 |
+
+ " rename_op_output on the producer instead. Offending op: \n{}"
|
| 715 |
+
).format(old_name, op_id, predict_net.op[op_id])
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
# update init_net
|
| 719 |
+
_rename_versioned_blob_in_proto(
|
| 720 |
+
init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions
|
| 721 |
+
)
|
| 722 |
+
# update predict_net
|
| 723 |
+
_rename_versioned_blob_in_proto(
|
| 724 |
+
predict_net,
|
| 725 |
+
old_name,
|
| 726 |
+
new_name,
|
| 727 |
+
version,
|
| 728 |
+
predict_net_ssa,
|
| 729 |
+
init_net_versions,
|
| 730 |
+
predict_net_versions,
|
| 731 |
+
)
|
| 732 |
+
|
| 733 |
+
|
| 734 |
+
def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str):
|
| 735 |
+
"""
|
| 736 |
+
Rename the op_id-th operator in predict_net, change it's output_id-th input's
|
| 737 |
+
name to the new_name. It also does automatic re-route and change
|
| 738 |
+
external_output and if necessary.
|
| 739 |
+
- It allows multiple consumers of its output.
|
| 740 |
+
- This function modifies predict_net in-place, doesn't need init_net.
|
| 741 |
+
"""
|
| 742 |
+
assert isinstance(predict_net, caffe2_pb2.NetDef)
|
| 743 |
+
|
| 744 |
+
ssa, blob_versions = core.get_ssa(predict_net)
|
| 745 |
+
|
| 746 |
+
versioned_inputs, versioned_outputs = ssa[op_id]
|
| 747 |
+
old_name, version = versioned_outputs[output_id]
|
| 748 |
+
|
| 749 |
+
# update predict_net
|
| 750 |
+
_rename_versioned_blob_in_proto(
|
| 751 |
+
predict_net, old_name, new_name, version, ssa, {}, blob_versions
|
| 752 |
+
)
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
def get_sub_graph_external_input_output(
|
| 756 |
+
predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int]
|
| 757 |
+
) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]:
|
| 758 |
+
"""
|
| 759 |
+
Return the list of external input/output of sub-graph,
|
| 760 |
+
each element is tuple of the name and corresponding version in predict_net.
|
| 761 |
+
|
| 762 |
+
external input/output is defined the same way as caffe2 NetDef.
|
| 763 |
+
"""
|
| 764 |
+
ssa, versions = core.get_ssa(predict_net)
|
| 765 |
+
|
| 766 |
+
all_inputs = []
|
| 767 |
+
all_outputs = []
|
| 768 |
+
for op_id in sub_graph_op_indices:
|
| 769 |
+
all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs]
|
| 770 |
+
all_outputs += list(ssa[op_id][1]) # ssa output won't repeat
|
| 771 |
+
|
| 772 |
+
# for versioned blobs, external inputs are just those blob in all_inputs
|
| 773 |
+
# but not in all_outputs
|
| 774 |
+
ext_inputs = [inp for inp in all_inputs if inp not in all_outputs]
|
| 775 |
+
|
| 776 |
+
# external outputs are essentially outputs of this subgraph that are used
|
| 777 |
+
# outside of this sub-graph (including predict_net.external_output)
|
| 778 |
+
all_other_inputs = sum(
|
| 779 |
+
(ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices),
|
| 780 |
+
[(outp, versions[outp]) for outp in predict_net.external_output],
|
| 781 |
+
)
|
| 782 |
+
ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)]
|
| 783 |
+
|
| 784 |
+
return ext_inputs, ext_outputs
|
| 785 |
+
|
| 786 |
+
|
| 787 |
+
class DiGraph:
|
| 788 |
+
"""A DAG representation of caffe2 graph, each vertice is a versioned blob."""
|
| 789 |
+
|
| 790 |
+
def __init__(self):
|
| 791 |
+
self.vertices = set()
|
| 792 |
+
self.graph = collections.defaultdict(list)
|
| 793 |
+
|
| 794 |
+
def add_edge(self, u, v):
|
| 795 |
+
self.graph[u].append(v)
|
| 796 |
+
self.vertices.add(u)
|
| 797 |
+
self.vertices.add(v)
|
| 798 |
+
|
| 799 |
+
# grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/
|
| 800 |
+
def get_all_paths(self, s, d):
|
| 801 |
+
visited = {k: False for k in self.vertices}
|
| 802 |
+
path = []
|
| 803 |
+
all_paths = []
|
| 804 |
+
|
| 805 |
+
def _get_all_paths_util(graph, u, d, visited, path):
|
| 806 |
+
visited[u] = True
|
| 807 |
+
path.append(u)
|
| 808 |
+
if u == d:
|
| 809 |
+
all_paths.append(copy.deepcopy(path))
|
| 810 |
+
else:
|
| 811 |
+
for i in graph[u]:
|
| 812 |
+
if not visited[i]:
|
| 813 |
+
_get_all_paths_util(graph, i, d, visited, path)
|
| 814 |
+
path.pop()
|
| 815 |
+
visited[u] = False
|
| 816 |
+
|
| 817 |
+
_get_all_paths_util(self.graph, s, d, visited, path)
|
| 818 |
+
return all_paths
|
| 819 |
+
|
| 820 |
+
@staticmethod
|
| 821 |
+
def from_ssa(ssa):
|
| 822 |
+
graph = DiGraph()
|
| 823 |
+
for op_id in range(len(ssa)):
|
| 824 |
+
for inp in ssa[op_id][0]:
|
| 825 |
+
for outp in ssa[op_id][1]:
|
| 826 |
+
graph.add_edge(inp, outp)
|
| 827 |
+
return graph
|
| 828 |
+
|
| 829 |
+
|
| 830 |
+
def _get_dependency_chain(ssa, versioned_target, versioned_source):
|
| 831 |
+
"""
|
| 832 |
+
Return the index list of relevant operator to produce target blob from source blob,
|
| 833 |
+
if there's no dependency, return empty list.
|
| 834 |
+
"""
|
| 835 |
+
|
| 836 |
+
# finding all paths between nodes can be O(N!), thus we can only search
|
| 837 |
+
# in the subgraph using the op starting from the first consumer of source blob
|
| 838 |
+
# to the producer of the target blob.
|
| 839 |
+
consumer_map = get_consumer_map(ssa)
|
| 840 |
+
producer_map = get_producer_map(ssa)
|
| 841 |
+
start_op = min(x[0] for x in consumer_map[versioned_source]) - 15
|
| 842 |
+
end_op = (
|
| 843 |
+
producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op
|
| 844 |
+
)
|
| 845 |
+
sub_graph_ssa = ssa[start_op : end_op + 1]
|
| 846 |
+
if len(sub_graph_ssa) > 30:
|
| 847 |
+
logger.warning(
|
| 848 |
+
"Subgraph bebetween {} and {} is large (from op#{} to op#{}), it"
|
| 849 |
+
" might take non-trival time to find all paths between them.".format(
|
| 850 |
+
versioned_source, versioned_target, start_op, end_op
|
| 851 |
+
)
|
| 852 |
+
)
|
| 853 |
+
|
| 854 |
+
dag = DiGraph.from_ssa(sub_graph_ssa)
|
| 855 |
+
paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends
|
| 856 |
+
ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths]
|
| 857 |
+
return sorted(set().union(*[set(ops) for ops in ops_in_paths]))
|
| 858 |
+
|
| 859 |
+
|
| 860 |
+
def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]:
|
| 861 |
+
"""
|
| 862 |
+
Idenfity the reshape sub-graph in a protobuf.
|
| 863 |
+
The reshape sub-graph is defined as matching the following pattern:
|
| 864 |
+
|
| 865 |
+
(input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐
|
| 866 |
+
└-------------------------------------------> Reshape -> (output_blob)
|
| 867 |
+
|
| 868 |
+
Return:
|
| 869 |
+
List of sub-graphs, each sub-graph is represented as a list of indices
|
| 870 |
+
of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape]
|
| 871 |
+
"""
|
| 872 |
+
|
| 873 |
+
ssa, _ = core.get_ssa(predict_net)
|
| 874 |
+
|
| 875 |
+
ret = []
|
| 876 |
+
for i, op in enumerate(predict_net.op):
|
| 877 |
+
if op.type == "Reshape":
|
| 878 |
+
assert len(op.input) == 2
|
| 879 |
+
input_ssa = ssa[i][0]
|
| 880 |
+
data_source = input_ssa[0]
|
| 881 |
+
shape_source = input_ssa[1]
|
| 882 |
+
op_indices = _get_dependency_chain(ssa, shape_source, data_source)
|
| 883 |
+
ret.append(op_indices + [i])
|
| 884 |
+
return ret
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
def remove_reshape_for_fc(predict_net, params):
|
| 888 |
+
"""
|
| 889 |
+
In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape
|
| 890 |
+
a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping
|
| 891 |
+
doesn't work well with ONNX and Int8 tools, and cause using extra
|
| 892 |
+
ops (eg. ExpandDims) that might not be available on mobile.
|
| 893 |
+
Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape
|
| 894 |
+
after exporting ONNX model.
|
| 895 |
+
"""
|
| 896 |
+
from caffe2.python import core
|
| 897 |
+
|
| 898 |
+
# find all reshape sub-graph that can be removed, which is now all Reshape
|
| 899 |
+
# sub-graph whose output is only consumed by FC.
|
| 900 |
+
# TODO: to make it safer, we may need the actually value to better determine
|
| 901 |
+
# if a Reshape before FC is removable.
|
| 902 |
+
reshape_sub_graphs = identify_reshape_sub_graph(predict_net)
|
| 903 |
+
sub_graphs_to_remove = []
|
| 904 |
+
for reshape_sub_graph in reshape_sub_graphs:
|
| 905 |
+
reshape_op_id = reshape_sub_graph[-1]
|
| 906 |
+
assert predict_net.op[reshape_op_id].type == "Reshape"
|
| 907 |
+
ssa, _ = core.get_ssa(predict_net)
|
| 908 |
+
reshape_output = ssa[reshape_op_id][1][0]
|
| 909 |
+
consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]]
|
| 910 |
+
if all(predict_net.op[consumer].type == "FC" for consumer in consumers):
|
| 911 |
+
# safety check if the sub-graph is isolated, for this reshape sub-graph,
|
| 912 |
+
# it means it has one non-param external input and one external output.
|
| 913 |
+
ext_inputs, ext_outputs = get_sub_graph_external_input_output(
|
| 914 |
+
predict_net, reshape_sub_graph
|
| 915 |
+
)
|
| 916 |
+
non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
|
| 917 |
+
if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1:
|
| 918 |
+
sub_graphs_to_remove.append(reshape_sub_graph)
|
| 919 |
+
|
| 920 |
+
# perform removing subgraph by:
|
| 921 |
+
# 1: rename the Reshape's output to its input, then the graph can be
|
| 922 |
+
# seen as in-place itentify, meaning whose external input/output are the same.
|
| 923 |
+
# 2: simply remove those ops.
|
| 924 |
+
remove_op_ids = []
|
| 925 |
+
params_to_remove = []
|
| 926 |
+
for sub_graph in sub_graphs_to_remove:
|
| 927 |
+
logger.info(
|
| 928 |
+
"Remove Reshape sub-graph:\n{}".format(
|
| 929 |
+
"".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph])
|
| 930 |
+
)
|
| 931 |
+
)
|
| 932 |
+
reshape_op_id = sub_graph[-1]
|
| 933 |
+
new_reshap_output = predict_net.op[reshape_op_id].input[0]
|
| 934 |
+
rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output)
|
| 935 |
+
ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph)
|
| 936 |
+
non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
|
| 937 |
+
params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0]
|
| 938 |
+
assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1
|
| 939 |
+
assert ext_outputs[0][0] == non_params_ext_inputs[0][0]
|
| 940 |
+
assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1
|
| 941 |
+
remove_op_ids.extend(sub_graph)
|
| 942 |
+
params_to_remove.extend(params_ext_inputs)
|
| 943 |
+
|
| 944 |
+
predict_net = copy.deepcopy(predict_net)
|
| 945 |
+
new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids]
|
| 946 |
+
del predict_net.op[:]
|
| 947 |
+
predict_net.op.extend(new_ops)
|
| 948 |
+
for versioned_params in params_to_remove:
|
| 949 |
+
name = versioned_params[0]
|
| 950 |
+
logger.info("Remove params: {} from init_net and predict_net.external_input".format(name))
|
| 951 |
+
del params[name]
|
| 952 |
+
predict_net.external_input.remove(name)
|
| 953 |
+
|
| 954 |
+
return predict_net, params
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef):
|
| 958 |
+
"""
|
| 959 |
+
In-place fuse extra copy ops between cpu/gpu for the following case:
|
| 960 |
+
a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1
|
| 961 |
+
-CopyBToA> c2 -NextOp2-> d2
|
| 962 |
+
The fused network will look like:
|
| 963 |
+
a -NextOp1-> d1
|
| 964 |
+
-NextOp2-> d2
|
| 965 |
+
"""
|
| 966 |
+
|
| 967 |
+
_COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"]
|
| 968 |
+
|
| 969 |
+
def _fuse_once(predict_net):
|
| 970 |
+
ssa, blob_versions = core.get_ssa(predict_net)
|
| 971 |
+
consumer_map = get_consumer_map(ssa)
|
| 972 |
+
versioned_external_output = [
|
| 973 |
+
(name, blob_versions[name]) for name in predict_net.external_output
|
| 974 |
+
]
|
| 975 |
+
|
| 976 |
+
for op_id, op in enumerate(predict_net.op):
|
| 977 |
+
if op.type in _COPY_OPS:
|
| 978 |
+
fw_copy_versioned_output = ssa[op_id][1][0]
|
| 979 |
+
consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]]
|
| 980 |
+
reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)]
|
| 981 |
+
|
| 982 |
+
is_fusable = (
|
| 983 |
+
len(consumer_ids) > 0
|
| 984 |
+
and fw_copy_versioned_output not in versioned_external_output
|
| 985 |
+
and all(
|
| 986 |
+
predict_net.op[_op_id].type == reverse_op_type
|
| 987 |
+
and ssa[_op_id][1][0] not in versioned_external_output
|
| 988 |
+
for _op_id in consumer_ids
|
| 989 |
+
)
|
| 990 |
+
)
|
| 991 |
+
|
| 992 |
+
if is_fusable:
|
| 993 |
+
for rv_copy_op_id in consumer_ids:
|
| 994 |
+
# making each NextOp uses "a" directly and removing Copy ops
|
| 995 |
+
rs_copy_versioned_output = ssa[rv_copy_op_id][1][0]
|
| 996 |
+
next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0]
|
| 997 |
+
predict_net.op[next_op_id].input[inp_id] = op.input[0]
|
| 998 |
+
# remove CopyOps
|
| 999 |
+
new_ops = [
|
| 1000 |
+
op
|
| 1001 |
+
for i, op in enumerate(predict_net.op)
|
| 1002 |
+
if i != op_id and i not in consumer_ids
|
| 1003 |
+
]
|
| 1004 |
+
del predict_net.op[:]
|
| 1005 |
+
predict_net.op.extend(new_ops)
|
| 1006 |
+
return True
|
| 1007 |
+
|
| 1008 |
+
return False
|
| 1009 |
+
|
| 1010 |
+
# _fuse_once returns False is nothing can be fused
|
| 1011 |
+
while _fuse_once(predict_net):
|
| 1012 |
+
pass
|
| 1013 |
+
|
| 1014 |
+
|
| 1015 |
+
def remove_dead_end_ops(net_def: caffe2_pb2.NetDef):
|
| 1016 |
+
"""remove ops if its output is not used or not in external_output"""
|
| 1017 |
+
ssa, versions = core.get_ssa(net_def)
|
| 1018 |
+
versioned_external_output = [(name, versions[name]) for name in net_def.external_output]
|
| 1019 |
+
consumer_map = get_consumer_map(ssa)
|
| 1020 |
+
removed_op_ids = set()
|
| 1021 |
+
|
| 1022 |
+
def _is_dead_end(versioned_blob):
|
| 1023 |
+
return not (
|
| 1024 |
+
versioned_blob in versioned_external_output
|
| 1025 |
+
or (
|
| 1026 |
+
len(consumer_map[versioned_blob]) > 0
|
| 1027 |
+
and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob])
|
| 1028 |
+
)
|
| 1029 |
+
)
|
| 1030 |
+
|
| 1031 |
+
for i, ssa_i in reversed(list(enumerate(ssa))):
|
| 1032 |
+
versioned_outputs = ssa_i[1]
|
| 1033 |
+
if all(_is_dead_end(outp) for outp in versioned_outputs):
|
| 1034 |
+
removed_op_ids.add(i)
|
| 1035 |
+
|
| 1036 |
+
# simply removing those deadend ops should have no effect to external_output
|
| 1037 |
+
new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids]
|
| 1038 |
+
del net_def.op[:]
|
| 1039 |
+
net_def.op.extend(new_ops)
|
RAVE-main/annotator/oneformer/detectron2/export/torchscript.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from annotator.oneformer.detectron2.utils.file_io import PathManager
|
| 7 |
+
|
| 8 |
+
from .torchscript_patch import freeze_training_mode, patch_instances
|
| 9 |
+
|
| 10 |
+
__all__ = ["scripting_with_instances", "dump_torchscript_IR"]
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def scripting_with_instances(model, fields):
|
| 14 |
+
"""
|
| 15 |
+
Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since
|
| 16 |
+
attributes of :class:`Instances` are "dynamically" added in eager mode,it is difficult
|
| 17 |
+
for scripting to support it out of the box. This function is made to support scripting
|
| 18 |
+
a model that uses :class:`Instances`. It does the following:
|
| 19 |
+
|
| 20 |
+
1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``,
|
| 21 |
+
but with all attributes been "static".
|
| 22 |
+
The attributes need to be statically declared in the ``fields`` argument.
|
| 23 |
+
2. Register ``new_Instances``, and force scripting compiler to
|
| 24 |
+
use it when trying to compile ``Instances``.
|
| 25 |
+
|
| 26 |
+
After this function, the process will be reverted. User should be able to script another model
|
| 27 |
+
using different fields.
|
| 28 |
+
|
| 29 |
+
Example:
|
| 30 |
+
Assume that ``Instances`` in the model consist of two attributes named
|
| 31 |
+
``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and
|
| 32 |
+
:class:`Tensor` respectively during inference. You can call this function like:
|
| 33 |
+
::
|
| 34 |
+
fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
|
| 35 |
+
torchscipt_model = scripting_with_instances(model, fields)
|
| 36 |
+
|
| 37 |
+
Note:
|
| 38 |
+
It only support models in evaluation mode.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
model (nn.Module): The input model to be exported by scripting.
|
| 42 |
+
fields (Dict[str, type]): Attribute names and corresponding type that
|
| 43 |
+
``Instances`` will use in the model. Note that all attributes used in ``Instances``
|
| 44 |
+
need to be added, regardless of whether they are inputs/outputs of the model.
|
| 45 |
+
Data type not defined in detectron2 is not supported for now.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
torch.jit.ScriptModule: the model in torchscript format
|
| 49 |
+
"""
|
| 50 |
+
assert (
|
| 51 |
+
not model.training
|
| 52 |
+
), "Currently we only support exporting models in evaluation mode to torchscript"
|
| 53 |
+
|
| 54 |
+
with freeze_training_mode(model), patch_instances(fields):
|
| 55 |
+
scripted_model = torch.jit.script(model)
|
| 56 |
+
return scripted_model
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# alias for old name
|
| 60 |
+
export_torchscript_with_instances = scripting_with_instances
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def dump_torchscript_IR(model, dir):
|
| 64 |
+
"""
|
| 65 |
+
Dump IR of a TracedModule/ScriptModule/Function in various format (code, graph,
|
| 66 |
+
inlined graph). Useful for debugging.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
model (TracedModule/ScriptModule/ScriptFUnction): traced or scripted module
|
| 70 |
+
dir (str): output directory to dump files.
|
| 71 |
+
"""
|
| 72 |
+
dir = os.path.expanduser(dir)
|
| 73 |
+
PathManager.mkdirs(dir)
|
| 74 |
+
|
| 75 |
+
def _get_script_mod(mod):
|
| 76 |
+
if isinstance(mod, torch.jit.TracedModule):
|
| 77 |
+
return mod._actual_script_module
|
| 78 |
+
return mod
|
| 79 |
+
|
| 80 |
+
# Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code
|
| 81 |
+
with PathManager.open(os.path.join(dir, "model_ts_code.txt"), "w") as f:
|
| 82 |
+
|
| 83 |
+
def get_code(mod):
|
| 84 |
+
# Try a few ways to get code using private attributes.
|
| 85 |
+
try:
|
| 86 |
+
# This contains more information than just `mod.code`
|
| 87 |
+
return _get_script_mod(mod)._c.code
|
| 88 |
+
except AttributeError:
|
| 89 |
+
pass
|
| 90 |
+
try:
|
| 91 |
+
return mod.code
|
| 92 |
+
except AttributeError:
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
def dump_code(prefix, mod):
|
| 96 |
+
code = get_code(mod)
|
| 97 |
+
name = prefix or "root model"
|
| 98 |
+
if code is None:
|
| 99 |
+
f.write(f"Could not found code for {name} (type={mod.original_name})\n")
|
| 100 |
+
f.write("\n")
|
| 101 |
+
else:
|
| 102 |
+
f.write(f"\nCode for {name}, type={mod.original_name}:\n")
|
| 103 |
+
f.write(code)
|
| 104 |
+
f.write("\n")
|
| 105 |
+
f.write("-" * 80)
|
| 106 |
+
|
| 107 |
+
for name, m in mod.named_children():
|
| 108 |
+
dump_code(prefix + "." + name, m)
|
| 109 |
+
|
| 110 |
+
if isinstance(model, torch.jit.ScriptFunction):
|
| 111 |
+
f.write(get_code(model))
|
| 112 |
+
else:
|
| 113 |
+
dump_code("", model)
|
| 114 |
+
|
| 115 |
+
def _get_graph(model):
|
| 116 |
+
try:
|
| 117 |
+
# Recursively dump IR of all modules
|
| 118 |
+
return _get_script_mod(model)._c.dump_to_str(True, False, False)
|
| 119 |
+
except AttributeError:
|
| 120 |
+
return model.graph.str()
|
| 121 |
+
|
| 122 |
+
with PathManager.open(os.path.join(dir, "model_ts_IR.txt"), "w") as f:
|
| 123 |
+
f.write(_get_graph(model))
|
| 124 |
+
|
| 125 |
+
# Dump IR of the entire graph (all submodules inlined)
|
| 126 |
+
with PathManager.open(os.path.join(dir, "model_ts_IR_inlined.txt"), "w") as f:
|
| 127 |
+
f.write(str(model.inlined_graph))
|
| 128 |
+
|
| 129 |
+
if not isinstance(model, torch.jit.ScriptFunction):
|
| 130 |
+
# Dump the model structure in pytorch style
|
| 131 |
+
with PathManager.open(os.path.join(dir, "model.txt"), "w") as f:
|
| 132 |
+
f.write(str(model))
|
RAVE-main/annotator/oneformer/detectron2/export/torchscript_patch.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import tempfile
|
| 6 |
+
from contextlib import ExitStack, contextmanager
|
| 7 |
+
from copy import deepcopy
|
| 8 |
+
from unittest import mock
|
| 9 |
+
import torch
|
| 10 |
+
from torch import nn
|
| 11 |
+
|
| 12 |
+
# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
|
| 13 |
+
import annotator.oneformer.detectron2 # noqa F401
|
| 14 |
+
from annotator.oneformer.detectron2.structures import Boxes, Instances
|
| 15 |
+
from annotator.oneformer.detectron2.utils.env import _import_file
|
| 16 |
+
|
| 17 |
+
_counter = 0
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _clear_jit_cache():
|
| 21 |
+
from torch.jit._recursive import concrete_type_store
|
| 22 |
+
from torch.jit._state import _jit_caching_layer
|
| 23 |
+
|
| 24 |
+
concrete_type_store.type_store.clear() # for modules
|
| 25 |
+
_jit_caching_layer.clear() # for free functions
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _add_instances_conversion_methods(newInstances):
|
| 29 |
+
"""
|
| 30 |
+
Add from_instances methods to the scripted Instances class.
|
| 31 |
+
"""
|
| 32 |
+
cls_name = newInstances.__name__
|
| 33 |
+
|
| 34 |
+
@torch.jit.unused
|
| 35 |
+
def from_instances(instances: Instances):
|
| 36 |
+
"""
|
| 37 |
+
Create scripted Instances from original Instances
|
| 38 |
+
"""
|
| 39 |
+
fields = instances.get_fields()
|
| 40 |
+
image_size = instances.image_size
|
| 41 |
+
ret = newInstances(image_size)
|
| 42 |
+
for name, val in fields.items():
|
| 43 |
+
assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
|
| 44 |
+
setattr(ret, name, deepcopy(val))
|
| 45 |
+
return ret
|
| 46 |
+
|
| 47 |
+
newInstances.from_instances = from_instances
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
@contextmanager
|
| 51 |
+
def patch_instances(fields):
|
| 52 |
+
"""
|
| 53 |
+
A contextmanager, under which the Instances class in detectron2 is replaced
|
| 54 |
+
by a statically-typed scriptable class, defined by `fields`.
|
| 55 |
+
See more in `scripting_with_instances`.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
|
| 59 |
+
mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
|
| 60 |
+
) as f:
|
| 61 |
+
try:
|
| 62 |
+
# Objects that use Instances should not reuse previously-compiled
|
| 63 |
+
# results in cache, because `Instances` could be a new class each time.
|
| 64 |
+
_clear_jit_cache()
|
| 65 |
+
|
| 66 |
+
cls_name, s = _gen_instance_module(fields)
|
| 67 |
+
f.write(s)
|
| 68 |
+
f.flush()
|
| 69 |
+
f.close()
|
| 70 |
+
|
| 71 |
+
module = _import(f.name)
|
| 72 |
+
new_instances = getattr(module, cls_name)
|
| 73 |
+
_ = torch.jit.script(new_instances)
|
| 74 |
+
# let torchscript think Instances was scripted already
|
| 75 |
+
Instances.__torch_script_class__ = True
|
| 76 |
+
# let torchscript find new_instances when looking for the jit type of Instances
|
| 77 |
+
Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
|
| 78 |
+
|
| 79 |
+
_add_instances_conversion_methods(new_instances)
|
| 80 |
+
yield new_instances
|
| 81 |
+
finally:
|
| 82 |
+
try:
|
| 83 |
+
del Instances.__torch_script_class__
|
| 84 |
+
del Instances._jit_override_qualname
|
| 85 |
+
except AttributeError:
|
| 86 |
+
pass
|
| 87 |
+
sys.modules.pop(module.__name__)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _gen_instance_class(fields):
|
| 91 |
+
"""
|
| 92 |
+
Args:
|
| 93 |
+
fields (dict[name: type])
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
class _FieldType:
|
| 97 |
+
def __init__(self, name, type_):
|
| 98 |
+
assert isinstance(name, str), f"Field name must be str, got {name}"
|
| 99 |
+
self.name = name
|
| 100 |
+
self.type_ = type_
|
| 101 |
+
self.annotation = f"{type_.__module__}.{type_.__name__}"
|
| 102 |
+
|
| 103 |
+
fields = [_FieldType(k, v) for k, v in fields.items()]
|
| 104 |
+
|
| 105 |
+
def indent(level, s):
|
| 106 |
+
return " " * 4 * level + s
|
| 107 |
+
|
| 108 |
+
lines = []
|
| 109 |
+
|
| 110 |
+
global _counter
|
| 111 |
+
_counter += 1
|
| 112 |
+
|
| 113 |
+
cls_name = "ScriptedInstances{}".format(_counter)
|
| 114 |
+
|
| 115 |
+
field_names = tuple(x.name for x in fields)
|
| 116 |
+
extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
|
| 117 |
+
lines.append(
|
| 118 |
+
f"""
|
| 119 |
+
class {cls_name}:
|
| 120 |
+
def __init__(self, image_size: Tuple[int, int], {extra_args}):
|
| 121 |
+
self.image_size = image_size
|
| 122 |
+
self._field_names = {field_names}
|
| 123 |
+
"""
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
for f in fields:
|
| 127 |
+
lines.append(
|
| 128 |
+
indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
for f in fields:
|
| 132 |
+
lines.append(
|
| 133 |
+
f"""
|
| 134 |
+
@property
|
| 135 |
+
def {f.name}(self) -> {f.annotation}:
|
| 136 |
+
# has to use a local for type refinement
|
| 137 |
+
# https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
|
| 138 |
+
t = self._{f.name}
|
| 139 |
+
assert t is not None, "{f.name} is None and cannot be accessed!"
|
| 140 |
+
return t
|
| 141 |
+
|
| 142 |
+
@{f.name}.setter
|
| 143 |
+
def {f.name}(self, value: {f.annotation}) -> None:
|
| 144 |
+
self._{f.name} = value
|
| 145 |
+
"""
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# support method `__len__`
|
| 149 |
+
lines.append(
|
| 150 |
+
"""
|
| 151 |
+
def __len__(self) -> int:
|
| 152 |
+
"""
|
| 153 |
+
)
|
| 154 |
+
for f in fields:
|
| 155 |
+
lines.append(
|
| 156 |
+
f"""
|
| 157 |
+
t = self._{f.name}
|
| 158 |
+
if t is not None:
|
| 159 |
+
return len(t)
|
| 160 |
+
"""
|
| 161 |
+
)
|
| 162 |
+
lines.append(
|
| 163 |
+
"""
|
| 164 |
+
raise NotImplementedError("Empty Instances does not support __len__!")
|
| 165 |
+
"""
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
# support method `has`
|
| 169 |
+
lines.append(
|
| 170 |
+
"""
|
| 171 |
+
def has(self, name: str) -> bool:
|
| 172 |
+
"""
|
| 173 |
+
)
|
| 174 |
+
for f in fields:
|
| 175 |
+
lines.append(
|
| 176 |
+
f"""
|
| 177 |
+
if name == "{f.name}":
|
| 178 |
+
return self._{f.name} is not None
|
| 179 |
+
"""
|
| 180 |
+
)
|
| 181 |
+
lines.append(
|
| 182 |
+
"""
|
| 183 |
+
return False
|
| 184 |
+
"""
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# support method `to`
|
| 188 |
+
none_args = ", None" * len(fields)
|
| 189 |
+
lines.append(
|
| 190 |
+
f"""
|
| 191 |
+
def to(self, device: torch.device) -> "{cls_name}":
|
| 192 |
+
ret = {cls_name}(self.image_size{none_args})
|
| 193 |
+
"""
|
| 194 |
+
)
|
| 195 |
+
for f in fields:
|
| 196 |
+
if hasattr(f.type_, "to"):
|
| 197 |
+
lines.append(
|
| 198 |
+
f"""
|
| 199 |
+
t = self._{f.name}
|
| 200 |
+
if t is not None:
|
| 201 |
+
ret._{f.name} = t.to(device)
|
| 202 |
+
"""
|
| 203 |
+
)
|
| 204 |
+
else:
|
| 205 |
+
# For now, ignore fields that cannot be moved to devices.
|
| 206 |
+
# Maybe can support other tensor-like classes (e.g. __torch_function__)
|
| 207 |
+
pass
|
| 208 |
+
lines.append(
|
| 209 |
+
"""
|
| 210 |
+
return ret
|
| 211 |
+
"""
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
# support method `getitem`
|
| 215 |
+
none_args = ", None" * len(fields)
|
| 216 |
+
lines.append(
|
| 217 |
+
f"""
|
| 218 |
+
def __getitem__(self, item) -> "{cls_name}":
|
| 219 |
+
ret = {cls_name}(self.image_size{none_args})
|
| 220 |
+
"""
|
| 221 |
+
)
|
| 222 |
+
for f in fields:
|
| 223 |
+
lines.append(
|
| 224 |
+
f"""
|
| 225 |
+
t = self._{f.name}
|
| 226 |
+
if t is not None:
|
| 227 |
+
ret._{f.name} = t[item]
|
| 228 |
+
"""
|
| 229 |
+
)
|
| 230 |
+
lines.append(
|
| 231 |
+
"""
|
| 232 |
+
return ret
|
| 233 |
+
"""
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
# support method `cat`
|
| 237 |
+
# this version does not contain checks that all instances have same size and fields
|
| 238 |
+
none_args = ", None" * len(fields)
|
| 239 |
+
lines.append(
|
| 240 |
+
f"""
|
| 241 |
+
def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
|
| 242 |
+
ret = {cls_name}(self.image_size{none_args})
|
| 243 |
+
"""
|
| 244 |
+
)
|
| 245 |
+
for f in fields:
|
| 246 |
+
lines.append(
|
| 247 |
+
f"""
|
| 248 |
+
t = self._{f.name}
|
| 249 |
+
if t is not None:
|
| 250 |
+
values: List[{f.annotation}] = [x.{f.name} for x in instances]
|
| 251 |
+
if torch.jit.isinstance(t, torch.Tensor):
|
| 252 |
+
ret._{f.name} = torch.cat(values, dim=0)
|
| 253 |
+
else:
|
| 254 |
+
ret._{f.name} = t.cat(values)
|
| 255 |
+
"""
|
| 256 |
+
)
|
| 257 |
+
lines.append(
|
| 258 |
+
"""
|
| 259 |
+
return ret"""
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
# support method `get_fields()`
|
| 263 |
+
lines.append(
|
| 264 |
+
"""
|
| 265 |
+
def get_fields(self) -> Dict[str, Tensor]:
|
| 266 |
+
ret = {}
|
| 267 |
+
"""
|
| 268 |
+
)
|
| 269 |
+
for f in fields:
|
| 270 |
+
if f.type_ == Boxes:
|
| 271 |
+
stmt = "t.tensor"
|
| 272 |
+
elif f.type_ == torch.Tensor:
|
| 273 |
+
stmt = "t"
|
| 274 |
+
else:
|
| 275 |
+
stmt = f'assert False, "unsupported type {str(f.type_)}"'
|
| 276 |
+
lines.append(
|
| 277 |
+
f"""
|
| 278 |
+
t = self._{f.name}
|
| 279 |
+
if t is not None:
|
| 280 |
+
ret["{f.name}"] = {stmt}
|
| 281 |
+
"""
|
| 282 |
+
)
|
| 283 |
+
lines.append(
|
| 284 |
+
"""
|
| 285 |
+
return ret"""
|
| 286 |
+
)
|
| 287 |
+
return cls_name, os.linesep.join(lines)
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def _gen_instance_module(fields):
|
| 291 |
+
# TODO: find a more automatic way to enable import of other classes
|
| 292 |
+
s = """
|
| 293 |
+
from copy import deepcopy
|
| 294 |
+
import torch
|
| 295 |
+
from torch import Tensor
|
| 296 |
+
import typing
|
| 297 |
+
from typing import *
|
| 298 |
+
|
| 299 |
+
import annotator.oneformer.detectron2
|
| 300 |
+
from annotator.oneformer.detectron2.structures import Boxes, Instances
|
| 301 |
+
|
| 302 |
+
"""
|
| 303 |
+
|
| 304 |
+
cls_name, cls_def = _gen_instance_class(fields)
|
| 305 |
+
s += cls_def
|
| 306 |
+
return cls_name, s
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def _import(path):
|
| 310 |
+
return _import_file(
|
| 311 |
+
"{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
@contextmanager
|
| 316 |
+
def patch_builtin_len(modules=()):
|
| 317 |
+
"""
|
| 318 |
+
Patch the builtin len() function of a few detectron2 modules
|
| 319 |
+
to use __len__ instead, because __len__ does not convert values to
|
| 320 |
+
integers and therefore is friendly to tracing.
|
| 321 |
+
|
| 322 |
+
Args:
|
| 323 |
+
modules (list[stsr]): names of extra modules to patch len(), in
|
| 324 |
+
addition to those in detectron2.
|
| 325 |
+
"""
|
| 326 |
+
|
| 327 |
+
def _new_len(obj):
|
| 328 |
+
return obj.__len__()
|
| 329 |
+
|
| 330 |
+
with ExitStack() as stack:
|
| 331 |
+
MODULES = [
|
| 332 |
+
"detectron2.modeling.roi_heads.fast_rcnn",
|
| 333 |
+
"detectron2.modeling.roi_heads.mask_head",
|
| 334 |
+
"detectron2.modeling.roi_heads.keypoint_head",
|
| 335 |
+
] + list(modules)
|
| 336 |
+
ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
|
| 337 |
+
for m in ctxs:
|
| 338 |
+
m.side_effect = _new_len
|
| 339 |
+
yield
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def patch_nonscriptable_classes():
|
| 343 |
+
"""
|
| 344 |
+
Apply patches on a few nonscriptable detectron2 classes.
|
| 345 |
+
Should not have side-effects on eager usage.
|
| 346 |
+
"""
|
| 347 |
+
# __prepare_scriptable__ can also be added to models for easier maintenance.
|
| 348 |
+
# But it complicates the clean model code.
|
| 349 |
+
|
| 350 |
+
from annotator.oneformer.detectron2.modeling.backbone import ResNet, FPN
|
| 351 |
+
|
| 352 |
+
# Due to https://github.com/pytorch/pytorch/issues/36061,
|
| 353 |
+
# we change backbone to use ModuleList for scripting.
|
| 354 |
+
# (note: this changes param names in state_dict)
|
| 355 |
+
|
| 356 |
+
def prepare_resnet(self):
|
| 357 |
+
ret = deepcopy(self)
|
| 358 |
+
ret.stages = nn.ModuleList(ret.stages)
|
| 359 |
+
for k in self.stage_names:
|
| 360 |
+
delattr(ret, k)
|
| 361 |
+
return ret
|
| 362 |
+
|
| 363 |
+
ResNet.__prepare_scriptable__ = prepare_resnet
|
| 364 |
+
|
| 365 |
+
def prepare_fpn(self):
|
| 366 |
+
ret = deepcopy(self)
|
| 367 |
+
ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
|
| 368 |
+
ret.output_convs = nn.ModuleList(ret.output_convs)
|
| 369 |
+
for name, _ in self.named_children():
|
| 370 |
+
if name.startswith("fpn_"):
|
| 371 |
+
delattr(ret, name)
|
| 372 |
+
return ret
|
| 373 |
+
|
| 374 |
+
FPN.__prepare_scriptable__ = prepare_fpn
|
| 375 |
+
|
| 376 |
+
# Annotate some attributes to be constants for the purpose of scripting,
|
| 377 |
+
# even though they are not constants in eager mode.
|
| 378 |
+
from annotator.oneformer.detectron2.modeling.roi_heads import StandardROIHeads
|
| 379 |
+
|
| 380 |
+
if hasattr(StandardROIHeads, "__annotations__"):
|
| 381 |
+
# copy first to avoid editing annotations of base class
|
| 382 |
+
StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
|
| 383 |
+
StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
|
| 384 |
+
StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
# These patches are not supposed to have side-effects.
|
| 388 |
+
patch_nonscriptable_classes()
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
@contextmanager
|
| 392 |
+
def freeze_training_mode(model):
|
| 393 |
+
"""
|
| 394 |
+
A context manager that annotates the "training" attribute of every submodule
|
| 395 |
+
to constant, so that the training codepath in these modules can be
|
| 396 |
+
meta-compiled away. Upon exiting, the annotations are reverted.
|
| 397 |
+
"""
|
| 398 |
+
classes = {type(x) for x in model.modules()}
|
| 399 |
+
# __constants__ is the old way to annotate constants and not compatible
|
| 400 |
+
# with __annotations__ .
|
| 401 |
+
classes = {x for x in classes if not hasattr(x, "__constants__")}
|
| 402 |
+
for cls in classes:
|
| 403 |
+
cls.__annotations__["training"] = torch.jit.Final[bool]
|
| 404 |
+
yield
|
| 405 |
+
for cls in classes:
|
| 406 |
+
cls.__annotations__["training"] = bool
|
RAVE-main/annotator/oneformer/detectron2/layers/aspp.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
from copy import deepcopy
|
| 4 |
+
import fvcore.nn.weight_init as weight_init
|
| 5 |
+
import torch
|
| 6 |
+
from torch import nn
|
| 7 |
+
from torch.nn import functional as F
|
| 8 |
+
|
| 9 |
+
from .batch_norm import get_norm
|
| 10 |
+
from .blocks import DepthwiseSeparableConv2d
|
| 11 |
+
from .wrappers import Conv2d
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class ASPP(nn.Module):
|
| 15 |
+
"""
|
| 16 |
+
Atrous Spatial Pyramid Pooling (ASPP).
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(
|
| 20 |
+
self,
|
| 21 |
+
in_channels,
|
| 22 |
+
out_channels,
|
| 23 |
+
dilations,
|
| 24 |
+
*,
|
| 25 |
+
norm,
|
| 26 |
+
activation,
|
| 27 |
+
pool_kernel_size=None,
|
| 28 |
+
dropout: float = 0.0,
|
| 29 |
+
use_depthwise_separable_conv=False,
|
| 30 |
+
):
|
| 31 |
+
"""
|
| 32 |
+
Args:
|
| 33 |
+
in_channels (int): number of input channels for ASPP.
|
| 34 |
+
out_channels (int): number of output channels.
|
| 35 |
+
dilations (list): a list of 3 dilations in ASPP.
|
| 36 |
+
norm (str or callable): normalization for all conv layers.
|
| 37 |
+
See :func:`layers.get_norm` for supported format. norm is
|
| 38 |
+
applied to all conv layers except the conv following
|
| 39 |
+
global average pooling.
|
| 40 |
+
activation (callable): activation function.
|
| 41 |
+
pool_kernel_size (tuple, list): the average pooling size (kh, kw)
|
| 42 |
+
for image pooling layer in ASPP. If set to None, it always
|
| 43 |
+
performs global average pooling. If not None, it must be
|
| 44 |
+
divisible by the shape of inputs in forward(). It is recommended
|
| 45 |
+
to use a fixed input feature size in training, and set this
|
| 46 |
+
option to match this size, so that it performs global average
|
| 47 |
+
pooling in training, and the size of the pooling window stays
|
| 48 |
+
consistent in inference.
|
| 49 |
+
dropout (float): apply dropout on the output of ASPP. It is used in
|
| 50 |
+
the official DeepLab implementation with a rate of 0.1:
|
| 51 |
+
https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa
|
| 52 |
+
use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
|
| 53 |
+
for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`.
|
| 54 |
+
"""
|
| 55 |
+
super(ASPP, self).__init__()
|
| 56 |
+
assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations))
|
| 57 |
+
self.pool_kernel_size = pool_kernel_size
|
| 58 |
+
self.dropout = dropout
|
| 59 |
+
use_bias = norm == ""
|
| 60 |
+
self.convs = nn.ModuleList()
|
| 61 |
+
# conv 1x1
|
| 62 |
+
self.convs.append(
|
| 63 |
+
Conv2d(
|
| 64 |
+
in_channels,
|
| 65 |
+
out_channels,
|
| 66 |
+
kernel_size=1,
|
| 67 |
+
bias=use_bias,
|
| 68 |
+
norm=get_norm(norm, out_channels),
|
| 69 |
+
activation=deepcopy(activation),
|
| 70 |
+
)
|
| 71 |
+
)
|
| 72 |
+
weight_init.c2_xavier_fill(self.convs[-1])
|
| 73 |
+
# atrous convs
|
| 74 |
+
for dilation in dilations:
|
| 75 |
+
if use_depthwise_separable_conv:
|
| 76 |
+
self.convs.append(
|
| 77 |
+
DepthwiseSeparableConv2d(
|
| 78 |
+
in_channels,
|
| 79 |
+
out_channels,
|
| 80 |
+
kernel_size=3,
|
| 81 |
+
padding=dilation,
|
| 82 |
+
dilation=dilation,
|
| 83 |
+
norm1=norm,
|
| 84 |
+
activation1=deepcopy(activation),
|
| 85 |
+
norm2=norm,
|
| 86 |
+
activation2=deepcopy(activation),
|
| 87 |
+
)
|
| 88 |
+
)
|
| 89 |
+
else:
|
| 90 |
+
self.convs.append(
|
| 91 |
+
Conv2d(
|
| 92 |
+
in_channels,
|
| 93 |
+
out_channels,
|
| 94 |
+
kernel_size=3,
|
| 95 |
+
padding=dilation,
|
| 96 |
+
dilation=dilation,
|
| 97 |
+
bias=use_bias,
|
| 98 |
+
norm=get_norm(norm, out_channels),
|
| 99 |
+
activation=deepcopy(activation),
|
| 100 |
+
)
|
| 101 |
+
)
|
| 102 |
+
weight_init.c2_xavier_fill(self.convs[-1])
|
| 103 |
+
# image pooling
|
| 104 |
+
# We do not add BatchNorm because the spatial resolution is 1x1,
|
| 105 |
+
# the original TF implementation has BatchNorm.
|
| 106 |
+
if pool_kernel_size is None:
|
| 107 |
+
image_pooling = nn.Sequential(
|
| 108 |
+
nn.AdaptiveAvgPool2d(1),
|
| 109 |
+
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
|
| 110 |
+
)
|
| 111 |
+
else:
|
| 112 |
+
image_pooling = nn.Sequential(
|
| 113 |
+
nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1),
|
| 114 |
+
Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
|
| 115 |
+
)
|
| 116 |
+
weight_init.c2_xavier_fill(image_pooling[1])
|
| 117 |
+
self.convs.append(image_pooling)
|
| 118 |
+
|
| 119 |
+
self.project = Conv2d(
|
| 120 |
+
5 * out_channels,
|
| 121 |
+
out_channels,
|
| 122 |
+
kernel_size=1,
|
| 123 |
+
bias=use_bias,
|
| 124 |
+
norm=get_norm(norm, out_channels),
|
| 125 |
+
activation=deepcopy(activation),
|
| 126 |
+
)
|
| 127 |
+
weight_init.c2_xavier_fill(self.project)
|
| 128 |
+
|
| 129 |
+
def forward(self, x):
|
| 130 |
+
size = x.shape[-2:]
|
| 131 |
+
if self.pool_kernel_size is not None:
|
| 132 |
+
if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]:
|
| 133 |
+
raise ValueError(
|
| 134 |
+
"`pool_kernel_size` must be divisible by the shape of inputs. "
|
| 135 |
+
"Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size)
|
| 136 |
+
)
|
| 137 |
+
res = []
|
| 138 |
+
for conv in self.convs:
|
| 139 |
+
res.append(conv(x))
|
| 140 |
+
res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False)
|
| 141 |
+
res = torch.cat(res, dim=1)
|
| 142 |
+
res = self.project(res)
|
| 143 |
+
res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res
|
| 144 |
+
return res
|
RAVE-main/annotator/oneformer/detectron2/layers/batch_norm.py
ADDED
|
@@ -0,0 +1,300 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
import torch
|
| 3 |
+
import torch.distributed as dist
|
| 4 |
+
from fvcore.nn.distributed import differentiable_all_reduce
|
| 5 |
+
from torch import nn
|
| 6 |
+
from torch.nn import functional as F
|
| 7 |
+
|
| 8 |
+
from annotator.oneformer.detectron2.utils import comm, env
|
| 9 |
+
|
| 10 |
+
from .wrappers import BatchNorm2d
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class FrozenBatchNorm2d(nn.Module):
|
| 14 |
+
"""
|
| 15 |
+
BatchNorm2d where the batch statistics and the affine parameters are fixed.
|
| 16 |
+
|
| 17 |
+
It contains non-trainable buffers called
|
| 18 |
+
"weight" and "bias", "running_mean", "running_var",
|
| 19 |
+
initialized to perform identity transformation.
|
| 20 |
+
|
| 21 |
+
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
|
| 22 |
+
which are computed from the original four parameters of BN.
|
| 23 |
+
The affine transform `x * weight + bias` will perform the equivalent
|
| 24 |
+
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
|
| 25 |
+
When loading a backbone model from Caffe2, "running_mean" and "running_var"
|
| 26 |
+
will be left unchanged as identity transformation.
|
| 27 |
+
|
| 28 |
+
Other pre-trained backbone models may contain all 4 parameters.
|
| 29 |
+
|
| 30 |
+
The forward is implemented by `F.batch_norm(..., training=False)`.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
_version = 3
|
| 34 |
+
|
| 35 |
+
def __init__(self, num_features, eps=1e-5):
|
| 36 |
+
super().__init__()
|
| 37 |
+
self.num_features = num_features
|
| 38 |
+
self.eps = eps
|
| 39 |
+
self.register_buffer("weight", torch.ones(num_features))
|
| 40 |
+
self.register_buffer("bias", torch.zeros(num_features))
|
| 41 |
+
self.register_buffer("running_mean", torch.zeros(num_features))
|
| 42 |
+
self.register_buffer("running_var", torch.ones(num_features) - eps)
|
| 43 |
+
|
| 44 |
+
def forward(self, x):
|
| 45 |
+
if x.requires_grad:
|
| 46 |
+
# When gradients are needed, F.batch_norm will use extra memory
|
| 47 |
+
# because its backward op computes gradients for weight/bias as well.
|
| 48 |
+
scale = self.weight * (self.running_var + self.eps).rsqrt()
|
| 49 |
+
bias = self.bias - self.running_mean * scale
|
| 50 |
+
scale = scale.reshape(1, -1, 1, 1)
|
| 51 |
+
bias = bias.reshape(1, -1, 1, 1)
|
| 52 |
+
out_dtype = x.dtype # may be half
|
| 53 |
+
return x * scale.to(out_dtype) + bias.to(out_dtype)
|
| 54 |
+
else:
|
| 55 |
+
# When gradients are not needed, F.batch_norm is a single fused op
|
| 56 |
+
# and provide more optimization opportunities.
|
| 57 |
+
return F.batch_norm(
|
| 58 |
+
x,
|
| 59 |
+
self.running_mean,
|
| 60 |
+
self.running_var,
|
| 61 |
+
self.weight,
|
| 62 |
+
self.bias,
|
| 63 |
+
training=False,
|
| 64 |
+
eps=self.eps,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
def _load_from_state_dict(
|
| 68 |
+
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
| 69 |
+
):
|
| 70 |
+
version = local_metadata.get("version", None)
|
| 71 |
+
|
| 72 |
+
if version is None or version < 2:
|
| 73 |
+
# No running_mean/var in early versions
|
| 74 |
+
# This will silent the warnings
|
| 75 |
+
if prefix + "running_mean" not in state_dict:
|
| 76 |
+
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
|
| 77 |
+
if prefix + "running_var" not in state_dict:
|
| 78 |
+
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
|
| 79 |
+
|
| 80 |
+
super()._load_from_state_dict(
|
| 81 |
+
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
def __repr__(self):
|
| 85 |
+
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
|
| 86 |
+
|
| 87 |
+
@classmethod
|
| 88 |
+
def convert_frozen_batchnorm(cls, module):
|
| 89 |
+
"""
|
| 90 |
+
Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
module (torch.nn.Module):
|
| 94 |
+
|
| 95 |
+
Returns:
|
| 96 |
+
If module is BatchNorm/SyncBatchNorm, returns a new module.
|
| 97 |
+
Otherwise, in-place convert module and return it.
|
| 98 |
+
|
| 99 |
+
Similar to convert_sync_batchnorm in
|
| 100 |
+
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
|
| 101 |
+
"""
|
| 102 |
+
bn_module = nn.modules.batchnorm
|
| 103 |
+
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
|
| 104 |
+
res = module
|
| 105 |
+
if isinstance(module, bn_module):
|
| 106 |
+
res = cls(module.num_features)
|
| 107 |
+
if module.affine:
|
| 108 |
+
res.weight.data = module.weight.data.clone().detach()
|
| 109 |
+
res.bias.data = module.bias.data.clone().detach()
|
| 110 |
+
res.running_mean.data = module.running_mean.data
|
| 111 |
+
res.running_var.data = module.running_var.data
|
| 112 |
+
res.eps = module.eps
|
| 113 |
+
else:
|
| 114 |
+
for name, child in module.named_children():
|
| 115 |
+
new_child = cls.convert_frozen_batchnorm(child)
|
| 116 |
+
if new_child is not child:
|
| 117 |
+
res.add_module(name, new_child)
|
| 118 |
+
return res
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
def get_norm(norm, out_channels):
|
| 122 |
+
"""
|
| 123 |
+
Args:
|
| 124 |
+
norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
|
| 125 |
+
or a callable that takes a channel number and returns
|
| 126 |
+
the normalization layer as a nn.Module.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
nn.Module or None: the normalization layer
|
| 130 |
+
"""
|
| 131 |
+
if norm is None:
|
| 132 |
+
return None
|
| 133 |
+
if isinstance(norm, str):
|
| 134 |
+
if len(norm) == 0:
|
| 135 |
+
return None
|
| 136 |
+
norm = {
|
| 137 |
+
"BN": BatchNorm2d,
|
| 138 |
+
# Fixed in https://github.com/pytorch/pytorch/pull/36382
|
| 139 |
+
"SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
|
| 140 |
+
"FrozenBN": FrozenBatchNorm2d,
|
| 141 |
+
"GN": lambda channels: nn.GroupNorm(32, channels),
|
| 142 |
+
# for debugging:
|
| 143 |
+
"nnSyncBN": nn.SyncBatchNorm,
|
| 144 |
+
"naiveSyncBN": NaiveSyncBatchNorm,
|
| 145 |
+
# expose stats_mode N as an option to caller, required for zero-len inputs
|
| 146 |
+
"naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"),
|
| 147 |
+
"LN": lambda channels: LayerNorm(channels),
|
| 148 |
+
}[norm]
|
| 149 |
+
return norm(out_channels)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class NaiveSyncBatchNorm(BatchNorm2d):
|
| 153 |
+
"""
|
| 154 |
+
In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
|
| 155 |
+
when the batch size on each worker is different.
|
| 156 |
+
(e.g., when scale augmentation is used, or when it is applied to mask head).
|
| 157 |
+
|
| 158 |
+
This is a slower but correct alternative to `nn.SyncBatchNorm`.
|
| 159 |
+
|
| 160 |
+
Note:
|
| 161 |
+
There isn't a single definition of Sync BatchNorm.
|
| 162 |
+
|
| 163 |
+
When ``stats_mode==""``, this module computes overall statistics by using
|
| 164 |
+
statistics of each worker with equal weight. The result is true statistics
|
| 165 |
+
of all samples (as if they are all on one worker) only when all workers
|
| 166 |
+
have the same (N, H, W). This mode does not support inputs with zero batch size.
|
| 167 |
+
|
| 168 |
+
When ``stats_mode=="N"``, this module computes overall statistics by weighting
|
| 169 |
+
the statistics of each worker by their ``N``. The result is true statistics
|
| 170 |
+
of all samples (as if they are all on one worker) only when all workers
|
| 171 |
+
have the same (H, W). It is slower than ``stats_mode==""``.
|
| 172 |
+
|
| 173 |
+
Even though the result of this module may not be the true statistics of all samples,
|
| 174 |
+
it may still be reasonable because it might be preferrable to assign equal weights
|
| 175 |
+
to all workers, regardless of their (H, W) dimension, instead of putting larger weight
|
| 176 |
+
on larger images. From preliminary experiments, little difference is found between such
|
| 177 |
+
a simplified implementation and an accurate computation of overall mean & variance.
|
| 178 |
+
"""
|
| 179 |
+
|
| 180 |
+
def __init__(self, *args, stats_mode="", **kwargs):
|
| 181 |
+
super().__init__(*args, **kwargs)
|
| 182 |
+
assert stats_mode in ["", "N"]
|
| 183 |
+
self._stats_mode = stats_mode
|
| 184 |
+
|
| 185 |
+
def forward(self, input):
|
| 186 |
+
if comm.get_world_size() == 1 or not self.training:
|
| 187 |
+
return super().forward(input)
|
| 188 |
+
|
| 189 |
+
B, C = input.shape[0], input.shape[1]
|
| 190 |
+
|
| 191 |
+
half_input = input.dtype == torch.float16
|
| 192 |
+
if half_input:
|
| 193 |
+
# fp16 does not have good enough numerics for the reduction here
|
| 194 |
+
input = input.float()
|
| 195 |
+
mean = torch.mean(input, dim=[0, 2, 3])
|
| 196 |
+
meansqr = torch.mean(input * input, dim=[0, 2, 3])
|
| 197 |
+
|
| 198 |
+
if self._stats_mode == "":
|
| 199 |
+
assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
|
| 200 |
+
vec = torch.cat([mean, meansqr], dim=0)
|
| 201 |
+
vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())
|
| 202 |
+
mean, meansqr = torch.split(vec, C)
|
| 203 |
+
momentum = self.momentum
|
| 204 |
+
else:
|
| 205 |
+
if B == 0:
|
| 206 |
+
vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
|
| 207 |
+
vec = vec + input.sum() # make sure there is gradient w.r.t input
|
| 208 |
+
else:
|
| 209 |
+
vec = torch.cat(
|
| 210 |
+
[mean, meansqr, torch.ones([1], device=mean.device, dtype=mean.dtype)], dim=0
|
| 211 |
+
)
|
| 212 |
+
vec = differentiable_all_reduce(vec * B)
|
| 213 |
+
|
| 214 |
+
total_batch = vec[-1].detach()
|
| 215 |
+
momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
|
| 216 |
+
mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero
|
| 217 |
+
|
| 218 |
+
var = meansqr - mean * mean
|
| 219 |
+
invstd = torch.rsqrt(var + self.eps)
|
| 220 |
+
scale = self.weight * invstd
|
| 221 |
+
bias = self.bias - mean * scale
|
| 222 |
+
scale = scale.reshape(1, -1, 1, 1)
|
| 223 |
+
bias = bias.reshape(1, -1, 1, 1)
|
| 224 |
+
|
| 225 |
+
self.running_mean += momentum * (mean.detach() - self.running_mean)
|
| 226 |
+
self.running_var += momentum * (var.detach() - self.running_var)
|
| 227 |
+
ret = input * scale + bias
|
| 228 |
+
if half_input:
|
| 229 |
+
ret = ret.half()
|
| 230 |
+
return ret
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
class CycleBatchNormList(nn.ModuleList):
|
| 234 |
+
"""
|
| 235 |
+
Implement domain-specific BatchNorm by cycling.
|
| 236 |
+
|
| 237 |
+
When a BatchNorm layer is used for multiple input domains or input
|
| 238 |
+
features, it might need to maintain a separate test-time statistics
|
| 239 |
+
for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`.
|
| 240 |
+
|
| 241 |
+
This module implements it by using N separate BN layers
|
| 242 |
+
and it cycles through them every time a forward() is called.
|
| 243 |
+
|
| 244 |
+
NOTE: The caller of this module MUST guarantee to always call
|
| 245 |
+
this module by multiple of N times. Otherwise its test-time statistics
|
| 246 |
+
will be incorrect.
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):
|
| 250 |
+
"""
|
| 251 |
+
Args:
|
| 252 |
+
length: number of BatchNorm layers to cycle.
|
| 253 |
+
bn_class: the BatchNorm class to use
|
| 254 |
+
kwargs: arguments of the BatchNorm class, such as num_features.
|
| 255 |
+
"""
|
| 256 |
+
self._affine = kwargs.pop("affine", True)
|
| 257 |
+
super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])
|
| 258 |
+
if self._affine:
|
| 259 |
+
# shared affine, domain-specific BN
|
| 260 |
+
channels = self[0].num_features
|
| 261 |
+
self.weight = nn.Parameter(torch.ones(channels))
|
| 262 |
+
self.bias = nn.Parameter(torch.zeros(channels))
|
| 263 |
+
self._pos = 0
|
| 264 |
+
|
| 265 |
+
def forward(self, x):
|
| 266 |
+
ret = self[self._pos](x)
|
| 267 |
+
self._pos = (self._pos + 1) % len(self)
|
| 268 |
+
|
| 269 |
+
if self._affine:
|
| 270 |
+
w = self.weight.reshape(1, -1, 1, 1)
|
| 271 |
+
b = self.bias.reshape(1, -1, 1, 1)
|
| 272 |
+
return ret * w + b
|
| 273 |
+
else:
|
| 274 |
+
return ret
|
| 275 |
+
|
| 276 |
+
def extra_repr(self):
|
| 277 |
+
return f"affine={self._affine}"
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
class LayerNorm(nn.Module):
|
| 281 |
+
"""
|
| 282 |
+
A LayerNorm variant, popularized by Transformers, that performs point-wise mean and
|
| 283 |
+
variance normalization over the channel dimension for inputs that have shape
|
| 284 |
+
(batch_size, channels, height, width).
|
| 285 |
+
https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950
|
| 286 |
+
"""
|
| 287 |
+
|
| 288 |
+
def __init__(self, normalized_shape, eps=1e-6):
|
| 289 |
+
super().__init__()
|
| 290 |
+
self.weight = nn.Parameter(torch.ones(normalized_shape))
|
| 291 |
+
self.bias = nn.Parameter(torch.zeros(normalized_shape))
|
| 292 |
+
self.eps = eps
|
| 293 |
+
self.normalized_shape = (normalized_shape,)
|
| 294 |
+
|
| 295 |
+
def forward(self, x):
|
| 296 |
+
u = x.mean(1, keepdim=True)
|
| 297 |
+
s = (x - u).pow(2).mean(1, keepdim=True)
|
| 298 |
+
x = (x - u) / torch.sqrt(s + self.eps)
|
| 299 |
+
x = self.weight[:, None, None] * x + self.bias[:, None, None]
|
| 300 |
+
return x
|
RAVE-main/annotator/oneformer/detectron2/layers/blocks.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 3 |
+
|
| 4 |
+
import fvcore.nn.weight_init as weight_init
|
| 5 |
+
from torch import nn
|
| 6 |
+
|
| 7 |
+
from .batch_norm import FrozenBatchNorm2d, get_norm
|
| 8 |
+
from .wrappers import Conv2d
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
"""
|
| 12 |
+
CNN building blocks.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class CNNBlockBase(nn.Module):
|
| 17 |
+
"""
|
| 18 |
+
A CNN block is assumed to have input channels, output channels and a stride.
|
| 19 |
+
The input and output of `forward()` method must be NCHW tensors.
|
| 20 |
+
The method can perform arbitrary computation but must match the given
|
| 21 |
+
channels and stride specification.
|
| 22 |
+
|
| 23 |
+
Attribute:
|
| 24 |
+
in_channels (int):
|
| 25 |
+
out_channels (int):
|
| 26 |
+
stride (int):
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, in_channels, out_channels, stride):
|
| 30 |
+
"""
|
| 31 |
+
The `__init__` method of any subclass should also contain these arguments.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
in_channels (int):
|
| 35 |
+
out_channels (int):
|
| 36 |
+
stride (int):
|
| 37 |
+
"""
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.in_channels = in_channels
|
| 40 |
+
self.out_channels = out_channels
|
| 41 |
+
self.stride = stride
|
| 42 |
+
|
| 43 |
+
def freeze(self):
|
| 44 |
+
"""
|
| 45 |
+
Make this block not trainable.
|
| 46 |
+
This method sets all parameters to `requires_grad=False`,
|
| 47 |
+
and convert all BatchNorm layers to FrozenBatchNorm
|
| 48 |
+
|
| 49 |
+
Returns:
|
| 50 |
+
the block itself
|
| 51 |
+
"""
|
| 52 |
+
for p in self.parameters():
|
| 53 |
+
p.requires_grad = False
|
| 54 |
+
FrozenBatchNorm2d.convert_frozen_batchnorm(self)
|
| 55 |
+
return self
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class DepthwiseSeparableConv2d(nn.Module):
|
| 59 |
+
"""
|
| 60 |
+
A kxk depthwise convolution + a 1x1 convolution.
|
| 61 |
+
|
| 62 |
+
In :paper:`xception`, norm & activation are applied on the second conv.
|
| 63 |
+
:paper:`mobilenet` uses norm & activation on both convs.
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
def __init__(
|
| 67 |
+
self,
|
| 68 |
+
in_channels,
|
| 69 |
+
out_channels,
|
| 70 |
+
kernel_size=3,
|
| 71 |
+
padding=1,
|
| 72 |
+
dilation=1,
|
| 73 |
+
*,
|
| 74 |
+
norm1=None,
|
| 75 |
+
activation1=None,
|
| 76 |
+
norm2=None,
|
| 77 |
+
activation2=None,
|
| 78 |
+
):
|
| 79 |
+
"""
|
| 80 |
+
Args:
|
| 81 |
+
norm1, norm2 (str or callable): normalization for the two conv layers.
|
| 82 |
+
activation1, activation2 (callable(Tensor) -> Tensor): activation
|
| 83 |
+
function for the two conv layers.
|
| 84 |
+
"""
|
| 85 |
+
super().__init__()
|
| 86 |
+
self.depthwise = Conv2d(
|
| 87 |
+
in_channels,
|
| 88 |
+
in_channels,
|
| 89 |
+
kernel_size=kernel_size,
|
| 90 |
+
padding=padding,
|
| 91 |
+
dilation=dilation,
|
| 92 |
+
groups=in_channels,
|
| 93 |
+
bias=not norm1,
|
| 94 |
+
norm=get_norm(norm1, in_channels),
|
| 95 |
+
activation=activation1,
|
| 96 |
+
)
|
| 97 |
+
self.pointwise = Conv2d(
|
| 98 |
+
in_channels,
|
| 99 |
+
out_channels,
|
| 100 |
+
kernel_size=1,
|
| 101 |
+
bias=not norm2,
|
| 102 |
+
norm=get_norm(norm2, out_channels),
|
| 103 |
+
activation=activation2,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
# default initialization
|
| 107 |
+
weight_init.c2_msra_fill(self.depthwise)
|
| 108 |
+
weight_init.c2_msra_fill(self.pointwise)
|
| 109 |
+
|
| 110 |
+
def forward(self, x):
|
| 111 |
+
return self.pointwise(self.depthwise(x))
|
RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
#pragma once
|
| 3 |
+
#include <torch/types.h>
|
| 4 |
+
|
| 5 |
+
namespace detectron2 {
|
| 6 |
+
|
| 7 |
+
at::Tensor box_iou_rotated_cpu(
|
| 8 |
+
const at::Tensor& boxes1,
|
| 9 |
+
const at::Tensor& boxes2);
|
| 10 |
+
|
| 11 |
+
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
| 12 |
+
at::Tensor box_iou_rotated_cuda(
|
| 13 |
+
const at::Tensor& boxes1,
|
| 14 |
+
const at::Tensor& boxes2);
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
// Interface for Python
|
| 18 |
+
// inline is needed to prevent multiple function definitions when this header is
|
| 19 |
+
// included by different cpps
|
| 20 |
+
inline at::Tensor box_iou_rotated(
|
| 21 |
+
const at::Tensor& boxes1,
|
| 22 |
+
const at::Tensor& boxes2) {
|
| 23 |
+
assert(boxes1.device().is_cuda() == boxes2.device().is_cuda());
|
| 24 |
+
if (boxes1.device().is_cuda()) {
|
| 25 |
+
#if defined(WITH_CUDA) || defined(WITH_HIP)
|
| 26 |
+
return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous());
|
| 27 |
+
#else
|
| 28 |
+
AT_ERROR("Detectron2 is not compiled with GPU support!");
|
| 29 |
+
#endif
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous());
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
} // namespace detectron2
|
RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
#include "box_iou_rotated.h"
|
| 3 |
+
#include "box_iou_rotated_utils.h"
|
| 4 |
+
|
| 5 |
+
namespace detectron2 {
|
| 6 |
+
|
| 7 |
+
template <typename T>
|
| 8 |
+
void box_iou_rotated_cpu_kernel(
|
| 9 |
+
const at::Tensor& boxes1,
|
| 10 |
+
const at::Tensor& boxes2,
|
| 11 |
+
at::Tensor& ious) {
|
| 12 |
+
auto num_boxes1 = boxes1.size(0);
|
| 13 |
+
auto num_boxes2 = boxes2.size(0);
|
| 14 |
+
|
| 15 |
+
for (int i = 0; i < num_boxes1; i++) {
|
| 16 |
+
for (int j = 0; j < num_boxes2; j++) {
|
| 17 |
+
ious[i * num_boxes2 + j] = single_box_iou_rotated<T>(
|
| 18 |
+
boxes1[i].data_ptr<T>(), boxes2[j].data_ptr<T>());
|
| 19 |
+
}
|
| 20 |
+
}
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
at::Tensor box_iou_rotated_cpu(
|
| 24 |
+
// input must be contiguous:
|
| 25 |
+
const at::Tensor& boxes1,
|
| 26 |
+
const at::Tensor& boxes2) {
|
| 27 |
+
auto num_boxes1 = boxes1.size(0);
|
| 28 |
+
auto num_boxes2 = boxes2.size(0);
|
| 29 |
+
at::Tensor ious =
|
| 30 |
+
at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
|
| 31 |
+
|
| 32 |
+
box_iou_rotated_cpu_kernel<float>(boxes1, boxes2, ious);
|
| 33 |
+
|
| 34 |
+
// reshape from 1d array to 2d array
|
| 35 |
+
auto shape = std::vector<int64_t>{num_boxes1, num_boxes2};
|
| 36 |
+
return ious.reshape(shape);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
} // namespace detectron2
|
RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
#include <ATen/ATen.h>
|
| 3 |
+
#include <ATen/cuda/CUDAContext.h>
|
| 4 |
+
#include <c10/cuda/CUDAGuard.h>
|
| 5 |
+
#include <ATen/cuda/CUDAApplyUtils.cuh>
|
| 6 |
+
#include "box_iou_rotated_utils.h"
|
| 7 |
+
|
| 8 |
+
namespace detectron2 {
|
| 9 |
+
|
| 10 |
+
// 2D block with 32 * 16 = 512 threads per block
|
| 11 |
+
const int BLOCK_DIM_X = 32;
|
| 12 |
+
const int BLOCK_DIM_Y = 16;
|
| 13 |
+
|
| 14 |
+
template <typename T>
|
| 15 |
+
__global__ void box_iou_rotated_cuda_kernel(
|
| 16 |
+
const int n_boxes1,
|
| 17 |
+
const int n_boxes2,
|
| 18 |
+
const T* dev_boxes1,
|
| 19 |
+
const T* dev_boxes2,
|
| 20 |
+
T* dev_ious) {
|
| 21 |
+
const int row_start = blockIdx.x * blockDim.x;
|
| 22 |
+
const int col_start = blockIdx.y * blockDim.y;
|
| 23 |
+
|
| 24 |
+
const int row_size = min(n_boxes1 - row_start, blockDim.x);
|
| 25 |
+
const int col_size = min(n_boxes2 - col_start, blockDim.y);
|
| 26 |
+
|
| 27 |
+
__shared__ float block_boxes1[BLOCK_DIM_X * 5];
|
| 28 |
+
__shared__ float block_boxes2[BLOCK_DIM_Y * 5];
|
| 29 |
+
|
| 30 |
+
// It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
|
| 31 |
+
if (threadIdx.x < row_size && threadIdx.y == 0) {
|
| 32 |
+
block_boxes1[threadIdx.x * 5 + 0] =
|
| 33 |
+
dev_boxes1[(row_start + threadIdx.x) * 5 + 0];
|
| 34 |
+
block_boxes1[threadIdx.x * 5 + 1] =
|
| 35 |
+
dev_boxes1[(row_start + threadIdx.x) * 5 + 1];
|
| 36 |
+
block_boxes1[threadIdx.x * 5 + 2] =
|
| 37 |
+
dev_boxes1[(row_start + threadIdx.x) * 5 + 2];
|
| 38 |
+
block_boxes1[threadIdx.x * 5 + 3] =
|
| 39 |
+
dev_boxes1[(row_start + threadIdx.x) * 5 + 3];
|
| 40 |
+
block_boxes1[threadIdx.x * 5 + 4] =
|
| 41 |
+
dev_boxes1[(row_start + threadIdx.x) * 5 + 4];
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
if (threadIdx.x < col_size && threadIdx.y == 0) {
|
| 45 |
+
block_boxes2[threadIdx.x * 5 + 0] =
|
| 46 |
+
dev_boxes2[(col_start + threadIdx.x) * 5 + 0];
|
| 47 |
+
block_boxes2[threadIdx.x * 5 + 1] =
|
| 48 |
+
dev_boxes2[(col_start + threadIdx.x) * 5 + 1];
|
| 49 |
+
block_boxes2[threadIdx.x * 5 + 2] =
|
| 50 |
+
dev_boxes2[(col_start + threadIdx.x) * 5 + 2];
|
| 51 |
+
block_boxes2[threadIdx.x * 5 + 3] =
|
| 52 |
+
dev_boxes2[(col_start + threadIdx.x) * 5 + 3];
|
| 53 |
+
block_boxes2[threadIdx.x * 5 + 4] =
|
| 54 |
+
dev_boxes2[(col_start + threadIdx.x) * 5 + 4];
|
| 55 |
+
}
|
| 56 |
+
__syncthreads();
|
| 57 |
+
|
| 58 |
+
if (threadIdx.x < row_size && threadIdx.y < col_size) {
|
| 59 |
+
int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y;
|
| 60 |
+
dev_ious[offset] = single_box_iou_rotated<T>(
|
| 61 |
+
block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5);
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
at::Tensor box_iou_rotated_cuda(
|
| 66 |
+
// input must be contiguous
|
| 67 |
+
const at::Tensor& boxes1,
|
| 68 |
+
const at::Tensor& boxes2) {
|
| 69 |
+
using scalar_t = float;
|
| 70 |
+
AT_ASSERTM(
|
| 71 |
+
boxes1.scalar_type() == at::kFloat, "boxes1 must be a float tensor");
|
| 72 |
+
AT_ASSERTM(
|
| 73 |
+
boxes2.scalar_type() == at::kFloat, "boxes2 must be a float tensor");
|
| 74 |
+
AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor");
|
| 75 |
+
AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor");
|
| 76 |
+
at::cuda::CUDAGuard device_guard(boxes1.device());
|
| 77 |
+
|
| 78 |
+
auto num_boxes1 = boxes1.size(0);
|
| 79 |
+
auto num_boxes2 = boxes2.size(0);
|
| 80 |
+
|
| 81 |
+
at::Tensor ious =
|
| 82 |
+
at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
|
| 83 |
+
|
| 84 |
+
bool transpose = false;
|
| 85 |
+
if (num_boxes1 > 0 && num_boxes2 > 0) {
|
| 86 |
+
scalar_t *data1 = boxes1.data_ptr<scalar_t>(),
|
| 87 |
+
*data2 = boxes2.data_ptr<scalar_t>();
|
| 88 |
+
|
| 89 |
+
if (num_boxes2 > 65535 * BLOCK_DIM_Y) {
|
| 90 |
+
AT_ASSERTM(
|
| 91 |
+
num_boxes1 <= 65535 * BLOCK_DIM_Y,
|
| 92 |
+
"Too many boxes for box_iou_rotated_cuda!");
|
| 93 |
+
// x dim is allowed to be large, but y dim cannot,
|
| 94 |
+
// so we transpose the two to avoid "invalid configuration argument"
|
| 95 |
+
// error. We assume one of them is small. Otherwise the result is hard to
|
| 96 |
+
// fit in memory anyway.
|
| 97 |
+
std::swap(num_boxes1, num_boxes2);
|
| 98 |
+
std::swap(data1, data2);
|
| 99 |
+
transpose = true;
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
const int blocks_x =
|
| 103 |
+
at::cuda::ATenCeilDiv(static_cast<int>(num_boxes1), BLOCK_DIM_X);
|
| 104 |
+
const int blocks_y =
|
| 105 |
+
at::cuda::ATenCeilDiv(static_cast<int>(num_boxes2), BLOCK_DIM_Y);
|
| 106 |
+
|
| 107 |
+
dim3 blocks(blocks_x, blocks_y);
|
| 108 |
+
dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
|
| 109 |
+
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
| 110 |
+
|
| 111 |
+
box_iou_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
|
| 112 |
+
num_boxes1,
|
| 113 |
+
num_boxes2,
|
| 114 |
+
data1,
|
| 115 |
+
data2,
|
| 116 |
+
(scalar_t*)ious.data_ptr<scalar_t>());
|
| 117 |
+
|
| 118 |
+
AT_CUDA_CHECK(cudaGetLastError());
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
// reshape from 1d array to 2d array
|
| 122 |
+
auto shape = std::vector<int64_t>{num_boxes1, num_boxes2};
|
| 123 |
+
if (transpose) {
|
| 124 |
+
return ious.view(shape).t();
|
| 125 |
+
} else {
|
| 126 |
+
return ious.view(shape);
|
| 127 |
+
}
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
} // namespace detectron2
|
RAVE-main/annotator/oneformer/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h
ADDED
|
@@ -0,0 +1,370 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
#pragma once
|
| 3 |
+
|
| 4 |
+
#include <cassert>
|
| 5 |
+
#include <cmath>
|
| 6 |
+
|
| 7 |
+
#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
|
| 8 |
+
// Designates functions callable from the host (CPU) and the device (GPU)
|
| 9 |
+
#define HOST_DEVICE __host__ __device__
|
| 10 |
+
#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__
|
| 11 |
+
#else
|
| 12 |
+
#include <algorithm>
|
| 13 |
+
#define HOST_DEVICE
|
| 14 |
+
#define HOST_DEVICE_INLINE HOST_DEVICE inline
|
| 15 |
+
#endif
|
| 16 |
+
|
| 17 |
+
namespace detectron2 {
|
| 18 |
+
|
| 19 |
+
namespace {
|
| 20 |
+
|
| 21 |
+
template <typename T>
|
| 22 |
+
struct RotatedBox {
|
| 23 |
+
T x_ctr, y_ctr, w, h, a;
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
template <typename T>
|
| 27 |
+
struct Point {
|
| 28 |
+
T x, y;
|
| 29 |
+
HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {}
|
| 30 |
+
HOST_DEVICE_INLINE Point operator+(const Point& p) const {
|
| 31 |
+
return Point(x + p.x, y + p.y);
|
| 32 |
+
}
|
| 33 |
+
HOST_DEVICE_INLINE Point& operator+=(const Point& p) {
|
| 34 |
+
x += p.x;
|
| 35 |
+
y += p.y;
|
| 36 |
+
return *this;
|
| 37 |
+
}
|
| 38 |
+
HOST_DEVICE_INLINE Point operator-(const Point& p) const {
|
| 39 |
+
return Point(x - p.x, y - p.y);
|
| 40 |
+
}
|
| 41 |
+
HOST_DEVICE_INLINE Point operator*(const T coeff) const {
|
| 42 |
+
return Point(x * coeff, y * coeff);
|
| 43 |
+
}
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
template <typename T>
|
| 47 |
+
HOST_DEVICE_INLINE T dot_2d(const Point<T>& A, const Point<T>& B) {
|
| 48 |
+
return A.x * B.x + A.y * B.y;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
// R: result type. can be different from input type
|
| 52 |
+
template <typename T, typename R = T>
|
| 53 |
+
HOST_DEVICE_INLINE R cross_2d(const Point<T>& A, const Point<T>& B) {
|
| 54 |
+
return static_cast<R>(A.x) * static_cast<R>(B.y) -
|
| 55 |
+
static_cast<R>(B.x) * static_cast<R>(A.y);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
template <typename T>
|
| 59 |
+
HOST_DEVICE_INLINE void get_rotated_vertices(
|
| 60 |
+
const RotatedBox<T>& box,
|
| 61 |
+
Point<T> (&pts)[4]) {
|
| 62 |
+
// M_PI / 180. == 0.01745329251
|
| 63 |
+
double theta = box.a * 0.01745329251;
|
| 64 |
+
T cosTheta2 = (T)cos(theta) * 0.5f;
|
| 65 |
+
T sinTheta2 = (T)sin(theta) * 0.5f;
|
| 66 |
+
|
| 67 |
+
// y: top --> down; x: left --> right
|
| 68 |
+
pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w;
|
| 69 |
+
pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w;
|
| 70 |
+
pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w;
|
| 71 |
+
pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w;
|
| 72 |
+
pts[2].x = 2 * box.x_ctr - pts[0].x;
|
| 73 |
+
pts[2].y = 2 * box.y_ctr - pts[0].y;
|
| 74 |
+
pts[3].x = 2 * box.x_ctr - pts[1].x;
|
| 75 |
+
pts[3].y = 2 * box.y_ctr - pts[1].y;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
template <typename T>
|
| 79 |
+
HOST_DEVICE_INLINE int get_intersection_points(
|
| 80 |
+
const Point<T> (&pts1)[4],
|
| 81 |
+
const Point<T> (&pts2)[4],
|
| 82 |
+
Point<T> (&intersections)[24]) {
|
| 83 |
+
// Line vector
|
| 84 |
+
// A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1]
|
| 85 |
+
Point<T> vec1[4], vec2[4];
|
| 86 |
+
for (int i = 0; i < 4; i++) {
|
| 87 |
+
vec1[i] = pts1[(i + 1) % 4] - pts1[i];
|
| 88 |
+
vec2[i] = pts2[(i + 1) % 4] - pts2[i];
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
// When computing the intersection area, it doesn't hurt if we have
|
| 92 |
+
// more (duplicated/approximate) intersections/vertices than needed,
|
| 93 |
+
// while it can cause drastic difference if we miss an intersection/vertex.
|
| 94 |
+
// Therefore, we add an epsilon to relax the comparisons between
|
| 95 |
+
// the float point numbers that decide the intersection points.
|
| 96 |
+
double EPS = 1e-5;
|
| 97 |
+
|
| 98 |
+
// Line test - test all line combos for intersection
|
| 99 |
+
int num = 0; // number of intersections
|
| 100 |
+
for (int i = 0; i < 4; i++) {
|
| 101 |
+
for (int j = 0; j < 4; j++) {
|
| 102 |
+
// Solve for 2x2 Ax=b
|
| 103 |
+
T det = cross_2d<T>(vec2[j], vec1[i]);
|
| 104 |
+
|
| 105 |
+
// This takes care of parallel lines
|
| 106 |
+
if (fabs(det) <= 1e-14) {
|
| 107 |
+
continue;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
auto vec12 = pts2[j] - pts1[i];
|
| 111 |
+
|
| 112 |
+
T t1 = cross_2d<T>(vec2[j], vec12) / det;
|
| 113 |
+
T t2 = cross_2d<T>(vec1[i], vec12) / det;
|
| 114 |
+
|
| 115 |
+
if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) {
|
| 116 |
+
intersections[num++] = pts1[i] + vec1[i] * t1;
|
| 117 |
+
}
|
| 118 |
+
}
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
// Check for vertices of rect1 inside rect2
|
| 122 |
+
{
|
| 123 |
+
const auto& AB = vec2[0];
|
| 124 |
+
const auto& DA = vec2[3];
|
| 125 |
+
auto ABdotAB = dot_2d<T>(AB, AB);
|
| 126 |
+
auto ADdotAD = dot_2d<T>(DA, DA);
|
| 127 |
+
for (int i = 0; i < 4; i++) {
|
| 128 |
+
// assume ABCD is the rectangle, and P is the point to be judged
|
| 129 |
+
// P is inside ABCD iff. P's projection on AB lies within AB
|
| 130 |
+
// and P's projection on AD lies within AD
|
| 131 |
+
|
| 132 |
+
auto AP = pts1[i] - pts2[0];
|
| 133 |
+
|
| 134 |
+
auto APdotAB = dot_2d<T>(AP, AB);
|
| 135 |
+
auto APdotAD = -dot_2d<T>(AP, DA);
|
| 136 |
+
|
| 137 |
+
if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
|
| 138 |
+
(APdotAD < ADdotAD + EPS)) {
|
| 139 |
+
intersections[num++] = pts1[i];
|
| 140 |
+
}
|
| 141 |
+
}
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
// Reverse the check - check for vertices of rect2 inside rect1
|
| 145 |
+
{
|
| 146 |
+
const auto& AB = vec1[0];
|
| 147 |
+
const auto& DA = vec1[3];
|
| 148 |
+
auto ABdotAB = dot_2d<T>(AB, AB);
|
| 149 |
+
auto ADdotAD = dot_2d<T>(DA, DA);
|
| 150 |
+
for (int i = 0; i < 4; i++) {
|
| 151 |
+
auto AP = pts2[i] - pts1[0];
|
| 152 |
+
|
| 153 |
+
auto APdotAB = dot_2d<T>(AP, AB);
|
| 154 |
+
auto APdotAD = -dot_2d<T>(AP, DA);
|
| 155 |
+
|
| 156 |
+
if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
|
| 157 |
+
(APdotAD < ADdotAD + EPS)) {
|
| 158 |
+
intersections[num++] = pts2[i];
|
| 159 |
+
}
|
| 160 |
+
}
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
return num;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
template <typename T>
|
| 167 |
+
HOST_DEVICE_INLINE int convex_hull_graham(
|
| 168 |
+
const Point<T> (&p)[24],
|
| 169 |
+
const int& num_in,
|
| 170 |
+
Point<T> (&q)[24],
|
| 171 |
+
bool shift_to_zero = false) {
|
| 172 |
+
assert(num_in >= 2);
|
| 173 |
+
|
| 174 |
+
// Step 1:
|
| 175 |
+
// Find point with minimum y
|
| 176 |
+
// if more than 1 points have the same minimum y,
|
| 177 |
+
// pick the one with the minimum x.
|
| 178 |
+
int t = 0;
|
| 179 |
+
for (int i = 1; i < num_in; i++) {
|
| 180 |
+
if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) {
|
| 181 |
+
t = i;
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
auto& start = p[t]; // starting point
|
| 185 |
+
|
| 186 |
+
// Step 2:
|
| 187 |
+
// Subtract starting point from every points (for sorting in the next step)
|
| 188 |
+
for (int i = 0; i < num_in; i++) {
|
| 189 |
+
q[i] = p[i] - start;
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
// Swap the starting point to position 0
|
| 193 |
+
auto tmp = q[0];
|
| 194 |
+
q[0] = q[t];
|
| 195 |
+
q[t] = tmp;
|
| 196 |
+
|
| 197 |
+
// Step 3:
|
| 198 |
+
// Sort point 1 ~ num_in according to their relative cross-product values
|
| 199 |
+
// (essentially sorting according to angles)
|
| 200 |
+
// If the angles are the same, sort according to their distance to origin
|
| 201 |
+
T dist[24];
|
| 202 |
+
#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
|
| 203 |
+
// compute distance to origin before sort, and sort them together with the
|
| 204 |
+
// points
|
| 205 |
+
for (int i = 0; i < num_in; i++) {
|
| 206 |
+
dist[i] = dot_2d<T>(q[i], q[i]);
|
| 207 |
+
}
|
| 208 |
+
|
| 209 |
+
// CUDA version
|
| 210 |
+
// In the future, we can potentially use thrust
|
| 211 |
+
// for sorting here to improve speed (though not guaranteed)
|
| 212 |
+
for (int i = 1; i < num_in - 1; i++) {
|
| 213 |
+
for (int j = i + 1; j < num_in; j++) {
|
| 214 |
+
T crossProduct = cross_2d<T>(q[i], q[j]);
|
| 215 |
+
if ((crossProduct < -1e-6) ||
|
| 216 |
+
(fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) {
|
| 217 |
+
auto q_tmp = q[i];
|
| 218 |
+
q[i] = q[j];
|
| 219 |
+
q[j] = q_tmp;
|
| 220 |
+
auto dist_tmp = dist[i];
|
| 221 |
+
dist[i] = dist[j];
|
| 222 |
+
dist[j] = dist_tmp;
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
}
|
| 226 |
+
#else
|
| 227 |
+
// CPU version
|
| 228 |
+
std::sort(
|
| 229 |
+
q + 1, q + num_in, [](const Point<T>& A, const Point<T>& B) -> bool {
|
| 230 |
+
T temp = cross_2d<T>(A, B);
|
| 231 |
+
if (fabs(temp) < 1e-6) {
|
| 232 |
+
return dot_2d<T>(A, A) < dot_2d<T>(B, B);
|
| 233 |
+
} else {
|
| 234 |
+
return temp > 0;
|
| 235 |
+
}
|
| 236 |
+
});
|
| 237 |
+
// compute distance to origin after sort, since the points are now different.
|
| 238 |
+
for (int i = 0; i < num_in; i++) {
|
| 239 |
+
dist[i] = dot_2d<T>(q[i], q[i]);
|
| 240 |
+
}
|
| 241 |
+
#endif
|
| 242 |
+
|
| 243 |
+
// Step 4:
|
| 244 |
+
// Make sure there are at least 2 points (that don't overlap with each other)
|
| 245 |
+
// in the stack
|
| 246 |
+
int k; // index of the non-overlapped second point
|
| 247 |
+
for (k = 1; k < num_in; k++) {
|
| 248 |
+
if (dist[k] > 1e-8) {
|
| 249 |
+
break;
|
| 250 |
+
}
|
| 251 |
+
}
|
| 252 |
+
if (k == num_in) {
|
| 253 |
+
// We reach the end, which means the convex hull is just one point
|
| 254 |
+
q[0] = p[t];
|
| 255 |
+
return 1;
|
| 256 |
+
}
|
| 257 |
+
q[1] = q[k];
|
| 258 |
+
int m = 2; // 2 points in the stack
|
| 259 |
+
// Step 5:
|
| 260 |
+
// Finally we can start the scanning process.
|
| 261 |
+
// When a non-convex relationship between the 3 points is found
|
| 262 |
+
// (either concave shape or duplicated points),
|
| 263 |
+
// we pop the previous point from the stack
|
| 264 |
+
// until the 3-point relationship is convex again, or
|
| 265 |
+
// until the stack only contains two points
|
| 266 |
+
for (int i = k + 1; i < num_in; i++) {
|
| 267 |
+
while (m > 1) {
|
| 268 |
+
auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2];
|
| 269 |
+
// cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) -
|
| 270 |
+
// q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we
|
| 271 |
+
// compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means
|
| 272 |
+
// round to nearest floating point).
|
| 273 |
+
if (q1.x * q2.y >= q2.x * q1.y)
|
| 274 |
+
m--;
|
| 275 |
+
else
|
| 276 |
+
break;
|
| 277 |
+
}
|
| 278 |
+
// Using double also helps, but float can solve the issue for now.
|
| 279 |
+
// while (m > 1 && cross_2d<T, double>(q[i] - q[m - 2], q[m - 1] - q[m - 2])
|
| 280 |
+
// >= 0) {
|
| 281 |
+
// m--;
|
| 282 |
+
// }
|
| 283 |
+
q[m++] = q[i];
|
| 284 |
+
}
|
| 285 |
+
|
| 286 |
+
// Step 6 (Optional):
|
| 287 |
+
// In general sense we need the original coordinates, so we
|
| 288 |
+
// need to shift the points back (reverting Step 2)
|
| 289 |
+
// But if we're only interested in getting the area/perimeter of the shape
|
| 290 |
+
// We can simply return.
|
| 291 |
+
if (!shift_to_zero) {
|
| 292 |
+
for (int i = 0; i < m; i++) {
|
| 293 |
+
q[i] += start;
|
| 294 |
+
}
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
return m;
|
| 298 |
+
}
|
| 299 |
+
|
| 300 |
+
template <typename T>
|
| 301 |
+
HOST_DEVICE_INLINE T polygon_area(const Point<T> (&q)[24], const int& m) {
|
| 302 |
+
if (m <= 2) {
|
| 303 |
+
return 0;
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
T area = 0;
|
| 307 |
+
for (int i = 1; i < m - 1; i++) {
|
| 308 |
+
area += fabs(cross_2d<T>(q[i] - q[0], q[i + 1] - q[0]));
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
return area / 2.0;
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
template <typename T>
|
| 315 |
+
HOST_DEVICE_INLINE T rotated_boxes_intersection(
|
| 316 |
+
const RotatedBox<T>& box1,
|
| 317 |
+
const RotatedBox<T>& box2) {
|
| 318 |
+
// There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned
|
| 319 |
+
// from rotated_rect_intersection_pts
|
| 320 |
+
Point<T> intersectPts[24], orderedPts[24];
|
| 321 |
+
|
| 322 |
+
Point<T> pts1[4];
|
| 323 |
+
Point<T> pts2[4];
|
| 324 |
+
get_rotated_vertices<T>(box1, pts1);
|
| 325 |
+
get_rotated_vertices<T>(box2, pts2);
|
| 326 |
+
|
| 327 |
+
int num = get_intersection_points<T>(pts1, pts2, intersectPts);
|
| 328 |
+
|
| 329 |
+
if (num <= 2) {
|
| 330 |
+
return 0.0;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
// Convex Hull to order the intersection points in clockwise order and find
|
| 334 |
+
// the contour area.
|
| 335 |
+
int num_convex = convex_hull_graham<T>(intersectPts, num, orderedPts, true);
|
| 336 |
+
return polygon_area<T>(orderedPts, num_convex);
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
} // namespace
|
| 340 |
+
|
| 341 |
+
template <typename T>
|
| 342 |
+
HOST_DEVICE_INLINE T
|
| 343 |
+
single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) {
|
| 344 |
+
// shift center to the middle point to achieve higher precision in result
|
| 345 |
+
RotatedBox<T> box1, box2;
|
| 346 |
+
auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0;
|
| 347 |
+
auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0;
|
| 348 |
+
box1.x_ctr = box1_raw[0] - center_shift_x;
|
| 349 |
+
box1.y_ctr = box1_raw[1] - center_shift_y;
|
| 350 |
+
box1.w = box1_raw[2];
|
| 351 |
+
box1.h = box1_raw[3];
|
| 352 |
+
box1.a = box1_raw[4];
|
| 353 |
+
box2.x_ctr = box2_raw[0] - center_shift_x;
|
| 354 |
+
box2.y_ctr = box2_raw[1] - center_shift_y;
|
| 355 |
+
box2.w = box2_raw[2];
|
| 356 |
+
box2.h = box2_raw[3];
|
| 357 |
+
box2.a = box2_raw[4];
|
| 358 |
+
|
| 359 |
+
T area1 = box1.w * box1.h;
|
| 360 |
+
T area2 = box2.w * box2.h;
|
| 361 |
+
if (area1 < 1e-14 || area2 < 1e-14) {
|
| 362 |
+
return 0.f;
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
T intersection = rotated_boxes_intersection<T>(box1, box2);
|
| 366 |
+
T iou = intersection / (area1 + area2 - intersection);
|
| 367 |
+
return iou;
|
| 368 |
+
}
|
| 369 |
+
|
| 370 |
+
} // namespace detectron2
|
RAVE-main/annotator/oneformer/detectron2/layers/csrc/cocoeval/cocoeval.cpp
ADDED
|
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
// Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
#include "cocoeval.h"
|
| 3 |
+
#include <time.h>
|
| 4 |
+
#include <algorithm>
|
| 5 |
+
#include <cstdint>
|
| 6 |
+
#include <numeric>
|
| 7 |
+
|
| 8 |
+
using namespace pybind11::literals;
|
| 9 |
+
|
| 10 |
+
namespace detectron2 {
|
| 11 |
+
|
| 12 |
+
namespace COCOeval {
|
| 13 |
+
|
| 14 |
+
// Sort detections from highest score to lowest, such that
|
| 15 |
+
// detection_instances[detection_sorted_indices[t]] >=
|
| 16 |
+
// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match
|
| 17 |
+
// original COCO API
|
| 18 |
+
void SortInstancesByDetectionScore(
|
| 19 |
+
const std::vector<InstanceAnnotation>& detection_instances,
|
| 20 |
+
std::vector<uint64_t>* detection_sorted_indices) {
|
| 21 |
+
detection_sorted_indices->resize(detection_instances.size());
|
| 22 |
+
std::iota(
|
| 23 |
+
detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
|
| 24 |
+
std::stable_sort(
|
| 25 |
+
detection_sorted_indices->begin(),
|
| 26 |
+
detection_sorted_indices->end(),
|
| 27 |
+
[&detection_instances](size_t j1, size_t j2) {
|
| 28 |
+
return detection_instances[j1].score > detection_instances[j2].score;
|
| 29 |
+
});
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
// Partition the ground truth objects based on whether or not to ignore them
|
| 33 |
+
// based on area
|
| 34 |
+
void SortInstancesByIgnore(
|
| 35 |
+
const std::array<double, 2>& area_range,
|
| 36 |
+
const std::vector<InstanceAnnotation>& ground_truth_instances,
|
| 37 |
+
std::vector<uint64_t>* ground_truth_sorted_indices,
|
| 38 |
+
std::vector<bool>* ignores) {
|
| 39 |
+
ignores->clear();
|
| 40 |
+
ignores->reserve(ground_truth_instances.size());
|
| 41 |
+
for (auto o : ground_truth_instances) {
|
| 42 |
+
ignores->push_back(
|
| 43 |
+
o.ignore || o.area < area_range[0] || o.area > area_range[1]);
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
ground_truth_sorted_indices->resize(ground_truth_instances.size());
|
| 47 |
+
std::iota(
|
| 48 |
+
ground_truth_sorted_indices->begin(),
|
| 49 |
+
ground_truth_sorted_indices->end(),
|
| 50 |
+
0);
|
| 51 |
+
std::stable_sort(
|
| 52 |
+
ground_truth_sorted_indices->begin(),
|
| 53 |
+
ground_truth_sorted_indices->end(),
|
| 54 |
+
[&ignores](size_t j1, size_t j2) {
|
| 55 |
+
return (int)(*ignores)[j1] < (int)(*ignores)[j2];
|
| 56 |
+
});
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
// For each IOU threshold, greedily match each detected instance to a ground
|
| 60 |
+
// truth instance (if possible) and store the results
|
| 61 |
+
void MatchDetectionsToGroundTruth(
|
| 62 |
+
const std::vector<InstanceAnnotation>& detection_instances,
|
| 63 |
+
const std::vector<uint64_t>& detection_sorted_indices,
|
| 64 |
+
const std::vector<InstanceAnnotation>& ground_truth_instances,
|
| 65 |
+
const std::vector<uint64_t>& ground_truth_sorted_indices,
|
| 66 |
+
const std::vector<bool>& ignores,
|
| 67 |
+
const std::vector<std::vector<double>>& ious,
|
| 68 |
+
const std::vector<double>& iou_thresholds,
|
| 69 |
+
const std::array<double, 2>& area_range,
|
| 70 |
+
ImageEvaluation* results) {
|
| 71 |
+
// Initialize memory to store return data matches and ignore
|
| 72 |
+
const int num_iou_thresholds = iou_thresholds.size();
|
| 73 |
+
const int num_ground_truth = ground_truth_sorted_indices.size();
|
| 74 |
+
const int num_detections = detection_sorted_indices.size();
|
| 75 |
+
std::vector<uint64_t> ground_truth_matches(
|
| 76 |
+
num_iou_thresholds * num_ground_truth, 0);
|
| 77 |
+
std::vector<uint64_t>& detection_matches = results->detection_matches;
|
| 78 |
+
std::vector<bool>& detection_ignores = results->detection_ignores;
|
| 79 |
+
std::vector<bool>& ground_truth_ignores = results->ground_truth_ignores;
|
| 80 |
+
detection_matches.resize(num_iou_thresholds * num_detections, 0);
|
| 81 |
+
detection_ignores.resize(num_iou_thresholds * num_detections, false);
|
| 82 |
+
ground_truth_ignores.resize(num_ground_truth);
|
| 83 |
+
for (auto g = 0; g < num_ground_truth; ++g) {
|
| 84 |
+
ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]];
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
for (auto t = 0; t < num_iou_thresholds; ++t) {
|
| 88 |
+
for (auto d = 0; d < num_detections; ++d) {
|
| 89 |
+
// information about best match so far (match=-1 -> unmatched)
|
| 90 |
+
double best_iou = std::min(iou_thresholds[t], 1 - 1e-10);
|
| 91 |
+
int match = -1;
|
| 92 |
+
for (auto g = 0; g < num_ground_truth; ++g) {
|
| 93 |
+
// if this ground truth instance is already matched and not a
|
| 94 |
+
// crowd, it cannot be matched to another detection
|
| 95 |
+
if (ground_truth_matches[t * num_ground_truth + g] > 0 &&
|
| 96 |
+
!ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) {
|
| 97 |
+
continue;
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
// if detected instance matched to a regular ground truth
|
| 101 |
+
// instance, we can break on the first ground truth instance
|
| 102 |
+
// tagged as ignore (because they are sorted by the ignore tag)
|
| 103 |
+
if (match >= 0 && !ground_truth_ignores[match] &&
|
| 104 |
+
ground_truth_ignores[g]) {
|
| 105 |
+
break;
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
// if IOU overlap is the best so far, store the match appropriately
|
| 109 |
+
if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) {
|
| 110 |
+
best_iou = ious[d][ground_truth_sorted_indices[g]];
|
| 111 |
+
match = g;
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
+
// if match was made, store id of match for both detection and
|
| 115 |
+
// ground truth
|
| 116 |
+
if (match >= 0) {
|
| 117 |
+
detection_ignores[t * num_detections + d] = ground_truth_ignores[match];
|
| 118 |
+
detection_matches[t * num_detections + d] =
|
| 119 |
+
ground_truth_instances[ground_truth_sorted_indices[match]].id;
|
| 120 |
+
ground_truth_matches[t * num_ground_truth + match] =
|
| 121 |
+
detection_instances[detection_sorted_indices[d]].id;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
// set unmatched detections outside of area range to ignore
|
| 125 |
+
const InstanceAnnotation& detection =
|
| 126 |
+
detection_instances[detection_sorted_indices[d]];
|
| 127 |
+
detection_ignores[t * num_detections + d] =
|
| 128 |
+
detection_ignores[t * num_detections + d] ||
|
| 129 |
+
(detection_matches[t * num_detections + d] == 0 &&
|
| 130 |
+
(detection.area < area_range[0] || detection.area > area_range[1]));
|
| 131 |
+
}
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
// store detection score results
|
| 135 |
+
results->detection_scores.resize(detection_sorted_indices.size());
|
| 136 |
+
for (size_t d = 0; d < detection_sorted_indices.size(); ++d) {
|
| 137 |
+
results->detection_scores[d] =
|
| 138 |
+
detection_instances[detection_sorted_indices[d]].score;
|
| 139 |
+
}
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
std::vector<ImageEvaluation> EvaluateImages(
|
| 143 |
+
const std::vector<std::array<double, 2>>& area_ranges,
|
| 144 |
+
int max_detections,
|
| 145 |
+
const std::vector<double>& iou_thresholds,
|
| 146 |
+
const ImageCategoryInstances<std::vector<double>>& image_category_ious,
|
| 147 |
+
const ImageCategoryInstances<InstanceAnnotation>&
|
| 148 |
+
image_category_ground_truth_instances,
|
| 149 |
+
const ImageCategoryInstances<InstanceAnnotation>&
|
| 150 |
+
image_category_detection_instances) {
|
| 151 |
+
const int num_area_ranges = area_ranges.size();
|
| 152 |
+
const int num_images = image_category_ground_truth_instances.size();
|
| 153 |
+
const int num_categories =
|
| 154 |
+
image_category_ious.size() > 0 ? image_category_ious[0].size() : 0;
|
| 155 |
+
std::vector<uint64_t> detection_sorted_indices;
|
| 156 |
+
std::vector<uint64_t> ground_truth_sorted_indices;
|
| 157 |
+
std::vector<bool> ignores;
|
| 158 |
+
std::vector<ImageEvaluation> results_all(
|
| 159 |
+
num_images * num_area_ranges * num_categories);
|
| 160 |
+
|
| 161 |
+
// Store results for each image, category, and area range combination. Results
|
| 162 |
+
// for each IOU threshold are packed into the same ImageEvaluation object
|
| 163 |
+
for (auto i = 0; i < num_images; ++i) {
|
| 164 |
+
for (auto c = 0; c < num_categories; ++c) {
|
| 165 |
+
const std::vector<InstanceAnnotation>& ground_truth_instances =
|
| 166 |
+
image_category_ground_truth_instances[i][c];
|
| 167 |
+
const std::vector<InstanceAnnotation>& detection_instances =
|
| 168 |
+
image_category_detection_instances[i][c];
|
| 169 |
+
|
| 170 |
+
SortInstancesByDetectionScore(
|
| 171 |
+
detection_instances, &detection_sorted_indices);
|
| 172 |
+
if ((int)detection_sorted_indices.size() > max_detections) {
|
| 173 |
+
detection_sorted_indices.resize(max_detections);
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
for (size_t a = 0; a < area_ranges.size(); ++a) {
|
| 177 |
+
SortInstancesByIgnore(
|
| 178 |
+
area_ranges[a],
|
| 179 |
+
ground_truth_instances,
|
| 180 |
+
&ground_truth_sorted_indices,
|
| 181 |
+
&ignores);
|
| 182 |
+
|
| 183 |
+
MatchDetectionsToGroundTruth(
|
| 184 |
+
detection_instances,
|
| 185 |
+
detection_sorted_indices,
|
| 186 |
+
ground_truth_instances,
|
| 187 |
+
ground_truth_sorted_indices,
|
| 188 |
+
ignores,
|
| 189 |
+
image_category_ious[i][c],
|
| 190 |
+
iou_thresholds,
|
| 191 |
+
area_ranges[a],
|
| 192 |
+
&results_all
|
| 193 |
+
[c * num_area_ranges * num_images + a * num_images + i]);
|
| 194 |
+
}
|
| 195 |
+
}
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
return results_all;
|
| 199 |
+
}
|
| 200 |
+
|
| 201 |
+
// Convert a python list to a vector
|
| 202 |
+
template <typename T>
|
| 203 |
+
std::vector<T> list_to_vec(const py::list& l) {
|
| 204 |
+
std::vector<T> v(py::len(l));
|
| 205 |
+
for (int i = 0; i < (int)py::len(l); ++i) {
|
| 206 |
+
v[i] = l[i].cast<T>();
|
| 207 |
+
}
|
| 208 |
+
return v;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
// Helper function to Accumulate()
|
| 212 |
+
// Considers the evaluation results applicable to a particular category, area
|
| 213 |
+
// range, and max_detections parameter setting, which begin at
|
| 214 |
+
// evaluations[evaluation_index]. Extracts a sorted list of length n of all
|
| 215 |
+
// applicable detection instances concatenated across all images in the dataset,
|
| 216 |
+
// which are represented by the outputs evaluation_indices, detection_scores,
|
| 217 |
+
// image_detection_indices, and detection_sorted_indices--all of which are
|
| 218 |
+
// length n. evaluation_indices[i] stores the applicable index into
|
| 219 |
+
// evaluations[] for instance i, which has detection score detection_score[i],
|
| 220 |
+
// and is the image_detection_indices[i]'th of the list of detections
|
| 221 |
+
// for the image containing i. detection_sorted_indices[] defines a sorted
|
| 222 |
+
// permutation of the 3 other outputs
|
| 223 |
+
int BuildSortedDetectionList(
|
| 224 |
+
const std::vector<ImageEvaluation>& evaluations,
|
| 225 |
+
const int64_t evaluation_index,
|
| 226 |
+
const int64_t num_images,
|
| 227 |
+
const int max_detections,
|
| 228 |
+
std::vector<uint64_t>* evaluation_indices,
|
| 229 |
+
std::vector<double>* detection_scores,
|
| 230 |
+
std::vector<uint64_t>* detection_sorted_indices,
|
| 231 |
+
std::vector<uint64_t>* image_detection_indices) {
|
| 232 |
+
assert(evaluations.size() >= evaluation_index + num_images);
|
| 233 |
+
|
| 234 |
+
// Extract a list of object instances of the applicable category, area
|
| 235 |
+
// range, and max detections requirements such that they can be sorted
|
| 236 |
+
image_detection_indices->clear();
|
| 237 |
+
evaluation_indices->clear();
|
| 238 |
+
detection_scores->clear();
|
| 239 |
+
image_detection_indices->reserve(num_images * max_detections);
|
| 240 |
+
evaluation_indices->reserve(num_images * max_detections);
|
| 241 |
+
detection_scores->reserve(num_images * max_detections);
|
| 242 |
+
int num_valid_ground_truth = 0;
|
| 243 |
+
for (auto i = 0; i < num_images; ++i) {
|
| 244 |
+
const ImageEvaluation& evaluation = evaluations[evaluation_index + i];
|
| 245 |
+
|
| 246 |
+
for (int d = 0;
|
| 247 |
+
d < (int)evaluation.detection_scores.size() && d < max_detections;
|
| 248 |
+
++d) { // detected instances
|
| 249 |
+
evaluation_indices->push_back(evaluation_index + i);
|
| 250 |
+
image_detection_indices->push_back(d);
|
| 251 |
+
detection_scores->push_back(evaluation.detection_scores[d]);
|
| 252 |
+
}
|
| 253 |
+
for (auto ground_truth_ignore : evaluation.ground_truth_ignores) {
|
| 254 |
+
if (!ground_truth_ignore) {
|
| 255 |
+
++num_valid_ground_truth;
|
| 256 |
+
}
|
| 257 |
+
}
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
// Sort detections by decreasing score, using stable sort to match
|
| 261 |
+
// python implementation
|
| 262 |
+
detection_sorted_indices->resize(detection_scores->size());
|
| 263 |
+
std::iota(
|
| 264 |
+
detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
|
| 265 |
+
std::stable_sort(
|
| 266 |
+
detection_sorted_indices->begin(),
|
| 267 |
+
detection_sorted_indices->end(),
|
| 268 |
+
[&detection_scores](size_t j1, size_t j2) {
|
| 269 |
+
return (*detection_scores)[j1] > (*detection_scores)[j2];
|
| 270 |
+
});
|
| 271 |
+
|
| 272 |
+
return num_valid_ground_truth;
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
// Helper function to Accumulate()
|
| 276 |
+
// Compute a precision recall curve given a sorted list of detected instances
|
| 277 |
+
// encoded in evaluations, evaluation_indices, detection_scores,
|
| 278 |
+
// detection_sorted_indices, image_detection_indices (see
|
| 279 |
+
// BuildSortedDetectionList()). Using vectors precisions and recalls
|
| 280 |
+
// and temporary storage, output the results into precisions_out, recalls_out,
|
| 281 |
+
// and scores_out, which are large buffers containing many precion/recall curves
|
| 282 |
+
// for all possible parameter settings, with precisions_out_index and
|
| 283 |
+
// recalls_out_index defining the applicable indices to store results.
|
| 284 |
+
void ComputePrecisionRecallCurve(
|
| 285 |
+
const int64_t precisions_out_index,
|
| 286 |
+
const int64_t precisions_out_stride,
|
| 287 |
+
const int64_t recalls_out_index,
|
| 288 |
+
const std::vector<double>& recall_thresholds,
|
| 289 |
+
const int iou_threshold_index,
|
| 290 |
+
const int num_iou_thresholds,
|
| 291 |
+
const int num_valid_ground_truth,
|
| 292 |
+
const std::vector<ImageEvaluation>& evaluations,
|
| 293 |
+
const std::vector<uint64_t>& evaluation_indices,
|
| 294 |
+
const std::vector<double>& detection_scores,
|
| 295 |
+
const std::vector<uint64_t>& detection_sorted_indices,
|
| 296 |
+
const std::vector<uint64_t>& image_detection_indices,
|
| 297 |
+
std::vector<double>* precisions,
|
| 298 |
+
std::vector<double>* recalls,
|
| 299 |
+
std::vector<double>* precisions_out,
|
| 300 |
+
std::vector<double>* scores_out,
|
| 301 |
+
std::vector<double>* recalls_out) {
|
| 302 |
+
assert(recalls_out->size() > recalls_out_index);
|
| 303 |
+
|
| 304 |
+
// Compute precision/recall for each instance in the sorted list of detections
|
| 305 |
+
int64_t true_positives_sum = 0, false_positives_sum = 0;
|
| 306 |
+
precisions->clear();
|
| 307 |
+
recalls->clear();
|
| 308 |
+
precisions->reserve(detection_sorted_indices.size());
|
| 309 |
+
recalls->reserve(detection_sorted_indices.size());
|
| 310 |
+
assert(!evaluations.empty() || detection_sorted_indices.empty());
|
| 311 |
+
for (auto detection_sorted_index : detection_sorted_indices) {
|
| 312 |
+
const ImageEvaluation& evaluation =
|
| 313 |
+
evaluations[evaluation_indices[detection_sorted_index]];
|
| 314 |
+
const auto num_detections =
|
| 315 |
+
evaluation.detection_matches.size() / num_iou_thresholds;
|
| 316 |
+
const auto detection_index = iou_threshold_index * num_detections +
|
| 317 |
+
image_detection_indices[detection_sorted_index];
|
| 318 |
+
assert(evaluation.detection_matches.size() > detection_index);
|
| 319 |
+
assert(evaluation.detection_ignores.size() > detection_index);
|
| 320 |
+
const int64_t detection_match =
|
| 321 |
+
evaluation.detection_matches[detection_index];
|
| 322 |
+
const bool detection_ignores =
|
| 323 |
+
evaluation.detection_ignores[detection_index];
|
| 324 |
+
const auto true_positive = detection_match > 0 && !detection_ignores;
|
| 325 |
+
const auto false_positive = detection_match == 0 && !detection_ignores;
|
| 326 |
+
if (true_positive) {
|
| 327 |
+
++true_positives_sum;
|
| 328 |
+
}
|
| 329 |
+
if (false_positive) {
|
| 330 |
+
++false_positives_sum;
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
const double recall =
|
| 334 |
+
static_cast<double>(true_positives_sum) / num_valid_ground_truth;
|
| 335 |
+
recalls->push_back(recall);
|
| 336 |
+
const int64_t num_valid_detections =
|
| 337 |
+
true_positives_sum + false_positives_sum;
|
| 338 |
+
const double precision = num_valid_detections > 0
|
| 339 |
+
? static_cast<double>(true_positives_sum) / num_valid_detections
|
| 340 |
+
: 0.0;
|
| 341 |
+
precisions->push_back(precision);
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
(*recalls_out)[recalls_out_index] = !recalls->empty() ? recalls->back() : 0;
|
| 345 |
+
|
| 346 |
+
for (int64_t i = static_cast<int64_t>(precisions->size()) - 1; i > 0; --i) {
|
| 347 |
+
if ((*precisions)[i] > (*precisions)[i - 1]) {
|
| 348 |
+
(*precisions)[i - 1] = (*precisions)[i];
|
| 349 |
+
}
|
| 350 |
+
}
|
| 351 |
+
|
| 352 |
+
// Sample the per instance precision/recall list at each recall threshold
|
| 353 |
+
for (size_t r = 0; r < recall_thresholds.size(); ++r) {
|
| 354 |
+
// first index in recalls >= recall_thresholds[r]
|
| 355 |
+
std::vector<double>::iterator low = std::lower_bound(
|
| 356 |
+
recalls->begin(), recalls->end(), recall_thresholds[r]);
|
| 357 |
+
size_t precisions_index = low - recalls->begin();
|
| 358 |
+
|
| 359 |
+
const auto results_ind = precisions_out_index + r * precisions_out_stride;
|
| 360 |
+
assert(results_ind < precisions_out->size());
|
| 361 |
+
assert(results_ind < scores_out->size());
|
| 362 |
+
if (precisions_index < precisions->size()) {
|
| 363 |
+
(*precisions_out)[results_ind] = (*precisions)[precisions_index];
|
| 364 |
+
(*scores_out)[results_ind] =
|
| 365 |
+
detection_scores[detection_sorted_indices[precisions_index]];
|
| 366 |
+
} else {
|
| 367 |
+
(*precisions_out)[results_ind] = 0;
|
| 368 |
+
(*scores_out)[results_ind] = 0;
|
| 369 |
+
}
|
| 370 |
+
}
|
| 371 |
+
}
|
| 372 |
+
py::dict Accumulate(
|
| 373 |
+
const py::object& params,
|
| 374 |
+
const std::vector<ImageEvaluation>& evaluations) {
|
| 375 |
+
const std::vector<double> recall_thresholds =
|
| 376 |
+
list_to_vec<double>(params.attr("recThrs"));
|
| 377 |
+
const std::vector<int> max_detections =
|
| 378 |
+
list_to_vec<int>(params.attr("maxDets"));
|
| 379 |
+
const int num_iou_thresholds = py::len(params.attr("iouThrs"));
|
| 380 |
+
const int num_recall_thresholds = py::len(params.attr("recThrs"));
|
| 381 |
+
const int num_categories = params.attr("useCats").cast<int>() == 1
|
| 382 |
+
? py::len(params.attr("catIds"))
|
| 383 |
+
: 1;
|
| 384 |
+
const int num_area_ranges = py::len(params.attr("areaRng"));
|
| 385 |
+
const int num_max_detections = py::len(params.attr("maxDets"));
|
| 386 |
+
const int num_images = py::len(params.attr("imgIds"));
|
| 387 |
+
|
| 388 |
+
std::vector<double> precisions_out(
|
| 389 |
+
num_iou_thresholds * num_recall_thresholds * num_categories *
|
| 390 |
+
num_area_ranges * num_max_detections,
|
| 391 |
+
-1);
|
| 392 |
+
std::vector<double> recalls_out(
|
| 393 |
+
num_iou_thresholds * num_categories * num_area_ranges *
|
| 394 |
+
num_max_detections,
|
| 395 |
+
-1);
|
| 396 |
+
std::vector<double> scores_out(
|
| 397 |
+
num_iou_thresholds * num_recall_thresholds * num_categories *
|
| 398 |
+
num_area_ranges * num_max_detections,
|
| 399 |
+
-1);
|
| 400 |
+
|
| 401 |
+
// Consider the list of all detected instances in the entire dataset in one
|
| 402 |
+
// large list. evaluation_indices, detection_scores,
|
| 403 |
+
// image_detection_indices, and detection_sorted_indices all have the same
|
| 404 |
+
// length as this list, such that each entry corresponds to one detected
|
| 405 |
+
// instance
|
| 406 |
+
std::vector<uint64_t> evaluation_indices; // indices into evaluations[]
|
| 407 |
+
std::vector<double> detection_scores; // detection scores of each instance
|
| 408 |
+
std::vector<uint64_t> detection_sorted_indices; // sorted indices of all
|
| 409 |
+
// instances in the dataset
|
| 410 |
+
std::vector<uint64_t>
|
| 411 |
+
image_detection_indices; // indices into the list of detected instances in
|
| 412 |
+
// the same image as each instance
|
| 413 |
+
std::vector<double> precisions, recalls;
|
| 414 |
+
|
| 415 |
+
for (auto c = 0; c < num_categories; ++c) {
|
| 416 |
+
for (auto a = 0; a < num_area_ranges; ++a) {
|
| 417 |
+
for (auto m = 0; m < num_max_detections; ++m) {
|
| 418 |
+
// The COCO PythonAPI assumes evaluations[] (the return value of
|
| 419 |
+
// COCOeval::EvaluateImages() is one long list storing results for each
|
| 420 |
+
// combination of category, area range, and image id, with categories in
|
| 421 |
+
// the outermost loop and images in the innermost loop.
|
| 422 |
+
const int64_t evaluations_index =
|
| 423 |
+
c * num_area_ranges * num_images + a * num_images;
|
| 424 |
+
int num_valid_ground_truth = BuildSortedDetectionList(
|
| 425 |
+
evaluations,
|
| 426 |
+
evaluations_index,
|
| 427 |
+
num_images,
|
| 428 |
+
max_detections[m],
|
| 429 |
+
&evaluation_indices,
|
| 430 |
+
&detection_scores,
|
| 431 |
+
&detection_sorted_indices,
|
| 432 |
+
&image_detection_indices);
|
| 433 |
+
|
| 434 |
+
if (num_valid_ground_truth == 0) {
|
| 435 |
+
continue;
|
| 436 |
+
}
|
| 437 |
+
|
| 438 |
+
for (auto t = 0; t < num_iou_thresholds; ++t) {
|
| 439 |
+
// recalls_out is a flattened vectors representing a
|
| 440 |
+
// num_iou_thresholds X num_categories X num_area_ranges X
|
| 441 |
+
// num_max_detections matrix
|
| 442 |
+
const int64_t recalls_out_index =
|
| 443 |
+
t * num_categories * num_area_ranges * num_max_detections +
|
| 444 |
+
c * num_area_ranges * num_max_detections +
|
| 445 |
+
a * num_max_detections + m;
|
| 446 |
+
|
| 447 |
+
// precisions_out and scores_out are flattened vectors
|
| 448 |
+
// representing a num_iou_thresholds X num_recall_thresholds X
|
| 449 |
+
// num_categories X num_area_ranges X num_max_detections matrix
|
| 450 |
+
const int64_t precisions_out_stride =
|
| 451 |
+
num_categories * num_area_ranges * num_max_detections;
|
| 452 |
+
const int64_t precisions_out_index = t * num_recall_thresholds *
|
| 453 |
+
num_categories * num_area_ranges * num_max_detections +
|
| 454 |
+
c * num_area_ranges * num_max_detections +
|
| 455 |
+
a * num_max_detections + m;
|
| 456 |
+
|
| 457 |
+
ComputePrecisionRecallCurve(
|
| 458 |
+
precisions_out_index,
|
| 459 |
+
precisions_out_stride,
|
| 460 |
+
recalls_out_index,
|
| 461 |
+
recall_thresholds,
|
| 462 |
+
t,
|
| 463 |
+
num_iou_thresholds,
|
| 464 |
+
num_valid_ground_truth,
|
| 465 |
+
evaluations,
|
| 466 |
+
evaluation_indices,
|
| 467 |
+
detection_scores,
|
| 468 |
+
detection_sorted_indices,
|
| 469 |
+
image_detection_indices,
|
| 470 |
+
&precisions,
|
| 471 |
+
&recalls,
|
| 472 |
+
&precisions_out,
|
| 473 |
+
&scores_out,
|
| 474 |
+
&recalls_out);
|
| 475 |
+
}
|
| 476 |
+
}
|
| 477 |
+
}
|
| 478 |
+
}
|
| 479 |
+
|
| 480 |
+
time_t rawtime;
|
| 481 |
+
struct tm local_time;
|
| 482 |
+
std::array<char, 200> buffer;
|
| 483 |
+
time(&rawtime);
|
| 484 |
+
#ifdef _WIN32
|
| 485 |
+
localtime_s(&local_time, &rawtime);
|
| 486 |
+
#else
|
| 487 |
+
localtime_r(&rawtime, &local_time);
|
| 488 |
+
#endif
|
| 489 |
+
strftime(
|
| 490 |
+
buffer.data(), 200, "%Y-%m-%d %H:%num_max_detections:%S", &local_time);
|
| 491 |
+
return py::dict(
|
| 492 |
+
"params"_a = params,
|
| 493 |
+
"counts"_a = std::vector<int64_t>(
|
| 494 |
+
{num_iou_thresholds,
|
| 495 |
+
num_recall_thresholds,
|
| 496 |
+
num_categories,
|
| 497 |
+
num_area_ranges,
|
| 498 |
+
num_max_detections}),
|
| 499 |
+
"date"_a = buffer,
|
| 500 |
+
"precision"_a = precisions_out,
|
| 501 |
+
"recall"_a = recalls_out,
|
| 502 |
+
"scores"_a = scores_out);
|
| 503 |
+
}
|
| 504 |
+
|
| 505 |
+
} // namespace COCOeval
|
| 506 |
+
|
| 507 |
+
} // namespace detectron2
|