Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- FateZero-main/data/shape/man_skate/00005.png +3 -0
- FateZero-main/data/shape/swan_swarov/00006.png +3 -0
- RAVE-main/annotator/clipvision/__init__.py +127 -0
- RAVE-main/annotator/leres/__pycache__/__init__.cpython-38.pyc +0 -0
- RAVE-main/annotator/leres/pix2pix/options/__init__.py +1 -0
- RAVE-main/annotator/leres/pix2pix/options/__pycache__/__init__.cpython-38.pyc +0 -0
- RAVE-main/annotator/leres/pix2pix/options/__pycache__/base_options.cpython-38.pyc +0 -0
- RAVE-main/annotator/leres/pix2pix/options/__pycache__/test_options.cpython-38.pyc +0 -0
- RAVE-main/annotator/leres/pix2pix/options/base_options.py +156 -0
- RAVE-main/annotator/leres/pix2pix/options/test_options.py +22 -0
- RAVE-main/annotator/normalbae/LICENSE +21 -0
- RAVE-main/annotator/normalbae/__init__.py +81 -0
- RAVE-main/annotator/normalbae/models/NNET.py +22 -0
- RAVE-main/annotator/normalbae/models/baseline.py +85 -0
- RAVE-main/annotator/normalbae/models/submodules/decoder.py +202 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/.gitignore +109 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/caffe2_validate.py +138 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/__init__.py +137 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/activations.py +102 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/activations_jit.py +79 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/activations_me.py +174 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/onnx_export.py +120 -0
- RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/onnx_optimize.py +84 -0
- RAVE-main/annotator/normalbae/models/submodules/encoder.py +34 -0
- RAVE-main/annotator/normalbae/models/submodules/submodules.py +140 -0
- RAVE-main/annotator/oneformer/LICENSE +21 -0
- RAVE-main/annotator/oneformer/__init__.py +45 -0
- RAVE-main/annotator/oneformer/api.py +39 -0
- RAVE-main/annotator/oneformer/configs/ade20k/Base-ADE20K-UnifiedSegmentation.yaml +68 -0
- RAVE-main/annotator/oneformer/configs/ade20k/oneformer_R50_bs16_160k.yaml +58 -0
- RAVE-main/annotator/oneformer/configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml +40 -0
- RAVE-main/annotator/oneformer/configs/coco/Base-COCO-UnifiedSegmentation.yaml +54 -0
- RAVE-main/annotator/oneformer/configs/coco/oneformer_R50_bs16_50ep.yaml +59 -0
- RAVE-main/annotator/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml +25 -0
- RAVE-main/annotator/oneformer/oneformer/__init__.py +9 -0
- RAVE-main/annotator/oneformer/oneformer/config.py +239 -0
- RAVE-main/annotator/oneformer/oneformer/data/__init__.py +2 -0
- RAVE-main/annotator/oneformer/oneformer/data/build.py +117 -0
- RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/__init__.py +1 -0
- RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py +341 -0
- RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/dataset_mapper.py +203 -0
- RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/oneformer_unified_dataset_mapper.py +375 -0
- RAVE-main/annotator/oneformer/oneformer/data/datasets/__init__.py +7 -0
- RAVE-main/annotator/oneformer/oneformer/data/datasets/register_ade20k_instance.py +56 -0
- RAVE-main/annotator/oneformer/oneformer/data/datasets/register_ade20k_panoptic.py +394 -0
- RAVE-main/annotator/oneformer/oneformer/data/datasets/register_cityscapes_panoptic.py +199 -0
- RAVE-main/annotator/oneformer/oneformer/data/datasets/register_coco_panoptic2instance.py +44 -0
- RAVE-main/annotator/oneformer/oneformer/data/datasets/register_coco_panoptic_annos_semseg.py +367 -0
- RAVE-main/annotator/oneformer/oneformer/data/tokenizer.py +192 -0
- RAVE-main/annotator/oneformer/oneformer/demo/colormap.py +170 -0
FateZero-main/data/shape/man_skate/00005.png
ADDED
|
Git LFS Details
|
FateZero-main/data/shape/swan_swarov/00006.png
ADDED
|
Git LFS Details
|
RAVE-main/annotator/clipvision/__init__.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from modules import devices
|
| 5 |
+
from modules.modelloader import load_file_from_url
|
| 6 |
+
from annotator.annotator_path import models_path
|
| 7 |
+
from transformers import CLIPVisionModelWithProjection, CLIPVisionConfig, CLIPImageProcessor
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
config_clip_g = {
|
| 11 |
+
"attention_dropout": 0.0,
|
| 12 |
+
"dropout": 0.0,
|
| 13 |
+
"hidden_act": "gelu",
|
| 14 |
+
"hidden_size": 1664,
|
| 15 |
+
"image_size": 224,
|
| 16 |
+
"initializer_factor": 1.0,
|
| 17 |
+
"initializer_range": 0.02,
|
| 18 |
+
"intermediate_size": 8192,
|
| 19 |
+
"layer_norm_eps": 1e-05,
|
| 20 |
+
"model_type": "clip_vision_model",
|
| 21 |
+
"num_attention_heads": 16,
|
| 22 |
+
"num_channels": 3,
|
| 23 |
+
"num_hidden_layers": 48,
|
| 24 |
+
"patch_size": 14,
|
| 25 |
+
"projection_dim": 1280,
|
| 26 |
+
"torch_dtype": "float32"
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
config_clip_h = {
|
| 30 |
+
"attention_dropout": 0.0,
|
| 31 |
+
"dropout": 0.0,
|
| 32 |
+
"hidden_act": "gelu",
|
| 33 |
+
"hidden_size": 1280,
|
| 34 |
+
"image_size": 224,
|
| 35 |
+
"initializer_factor": 1.0,
|
| 36 |
+
"initializer_range": 0.02,
|
| 37 |
+
"intermediate_size": 5120,
|
| 38 |
+
"layer_norm_eps": 1e-05,
|
| 39 |
+
"model_type": "clip_vision_model",
|
| 40 |
+
"num_attention_heads": 16,
|
| 41 |
+
"num_channels": 3,
|
| 42 |
+
"num_hidden_layers": 32,
|
| 43 |
+
"patch_size": 14,
|
| 44 |
+
"projection_dim": 1024,
|
| 45 |
+
"torch_dtype": "float32"
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
config_clip_vitl = {
|
| 49 |
+
"attention_dropout": 0.0,
|
| 50 |
+
"dropout": 0.0,
|
| 51 |
+
"hidden_act": "quick_gelu",
|
| 52 |
+
"hidden_size": 1024,
|
| 53 |
+
"image_size": 224,
|
| 54 |
+
"initializer_factor": 1.0,
|
| 55 |
+
"initializer_range": 0.02,
|
| 56 |
+
"intermediate_size": 4096,
|
| 57 |
+
"layer_norm_eps": 1e-05,
|
| 58 |
+
"model_type": "clip_vision_model",
|
| 59 |
+
"num_attention_heads": 16,
|
| 60 |
+
"num_channels": 3,
|
| 61 |
+
"num_hidden_layers": 24,
|
| 62 |
+
"patch_size": 14,
|
| 63 |
+
"projection_dim": 768,
|
| 64 |
+
"torch_dtype": "float32"
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
configs = {
|
| 68 |
+
'clip_g': config_clip_g,
|
| 69 |
+
'clip_h': config_clip_h,
|
| 70 |
+
'clip_vitl': config_clip_vitl,
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
downloads = {
|
| 74 |
+
'clip_vitl': 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin',
|
| 75 |
+
'clip_g': 'https://huggingface.co/lllyasviel/Annotators/resolve/main/clip_g.pth',
|
| 76 |
+
'clip_h': 'https://huggingface.co/h94/IP-Adapter/resolve/main/models/image_encoder/pytorch_model.bin'
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
clip_vision_h_uc = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'clip_vision_h_uc.data')
|
| 81 |
+
clip_vision_h_uc = torch.load(clip_vision_h_uc)['uc']
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class ClipVisionDetector:
|
| 85 |
+
def __init__(self, config):
|
| 86 |
+
assert config in downloads
|
| 87 |
+
self.download_link = downloads[config]
|
| 88 |
+
self.model_path = os.path.join(models_path, 'clip_vision')
|
| 89 |
+
self.file_name = config + '.pth'
|
| 90 |
+
self.config = configs[config]
|
| 91 |
+
self.device = devices.get_device_for("controlnet")
|
| 92 |
+
os.makedirs(self.model_path, exist_ok=True)
|
| 93 |
+
file_path = os.path.join(self.model_path, self.file_name)
|
| 94 |
+
if not os.path.exists(file_path):
|
| 95 |
+
load_file_from_url(url=self.download_link, model_dir=self.model_path, file_name=self.file_name)
|
| 96 |
+
config = CLIPVisionConfig(**self.config)
|
| 97 |
+
self.model = CLIPVisionModelWithProjection(config)
|
| 98 |
+
self.processor = CLIPImageProcessor(crop_size=224,
|
| 99 |
+
do_center_crop=True,
|
| 100 |
+
do_convert_rgb=True,
|
| 101 |
+
do_normalize=True,
|
| 102 |
+
do_resize=True,
|
| 103 |
+
image_mean=[0.48145466, 0.4578275, 0.40821073],
|
| 104 |
+
image_std=[0.26862954, 0.26130258, 0.27577711],
|
| 105 |
+
resample=3,
|
| 106 |
+
size=224)
|
| 107 |
+
|
| 108 |
+
sd = torch.load(file_path, map_location=torch.device('cpu'))
|
| 109 |
+
self.model.load_state_dict(sd, strict=False)
|
| 110 |
+
del sd
|
| 111 |
+
|
| 112 |
+
self.model.eval()
|
| 113 |
+
self.model.cpu()
|
| 114 |
+
|
| 115 |
+
def unload_model(self):
|
| 116 |
+
if self.model is not None:
|
| 117 |
+
self.model.to('meta')
|
| 118 |
+
|
| 119 |
+
def __call__(self, input_image):
|
| 120 |
+
with torch.no_grad():
|
| 121 |
+
clip_vision_model = self.model.cpu()
|
| 122 |
+
feat = self.processor(images=input_image, return_tensors="pt")
|
| 123 |
+
feat['pixel_values'] = feat['pixel_values'].cpu()
|
| 124 |
+
result = clip_vision_model(**feat, output_hidden_states=True)
|
| 125 |
+
result['hidden_states'] = [v.to(devices.get_device_for("controlnet")) for v in result['hidden_states']]
|
| 126 |
+
result = {k: v.to(devices.get_device_for("controlnet")) if isinstance(v, torch.Tensor) else v for k, v in result.items()}
|
| 127 |
+
return result
|
RAVE-main/annotator/leres/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (2.88 kB). View file
|
|
|
RAVE-main/annotator/leres/pix2pix/options/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
"""This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
|
RAVE-main/annotator/leres/pix2pix/options/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (303 Bytes). View file
|
|
|
RAVE-main/annotator/leres/pix2pix/options/__pycache__/base_options.cpython-38.pyc
ADDED
|
Binary file (7.14 kB). View file
|
|
|
RAVE-main/annotator/leres/pix2pix/options/__pycache__/test_options.cpython-38.pyc
ADDED
|
Binary file (1.11 kB). View file
|
|
|
RAVE-main/annotator/leres/pix2pix/options/base_options.py
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
from ...pix2pix.util import util
|
| 4 |
+
# import torch
|
| 5 |
+
from ...pix2pix import models
|
| 6 |
+
# import pix2pix.data
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
class BaseOptions():
|
| 10 |
+
"""This class defines options used during both training and test time.
|
| 11 |
+
|
| 12 |
+
It also implements several helper functions such as parsing, printing, and saving the options.
|
| 13 |
+
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def __init__(self):
|
| 17 |
+
"""Reset the class; indicates the class hasn't been initailized"""
|
| 18 |
+
self.initialized = False
|
| 19 |
+
|
| 20 |
+
def initialize(self, parser):
|
| 21 |
+
"""Define the common options that are used in both training and test."""
|
| 22 |
+
# basic parameters
|
| 23 |
+
parser.add_argument('--dataroot', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
|
| 24 |
+
parser.add_argument('--name', type=str, default='void', help='mahdi_unet_new, scaled_unet')
|
| 25 |
+
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
|
| 26 |
+
parser.add_argument('--checkpoints_dir', type=str, default='./pix2pix/checkpoints', help='models are saved here')
|
| 27 |
+
# model parameters
|
| 28 |
+
parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
|
| 29 |
+
parser.add_argument('--input_nc', type=int, default=2, help='# of input image channels: 3 for RGB and 1 for grayscale')
|
| 30 |
+
parser.add_argument('--output_nc', type=int, default=1, help='# of output image channels: 3 for RGB and 1 for grayscale')
|
| 31 |
+
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
|
| 32 |
+
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
|
| 33 |
+
parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
|
| 34 |
+
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
|
| 35 |
+
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
|
| 36 |
+
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
|
| 37 |
+
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
|
| 38 |
+
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
|
| 39 |
+
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
|
| 40 |
+
# dataset parameters
|
| 41 |
+
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
|
| 42 |
+
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
|
| 43 |
+
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
|
| 44 |
+
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
|
| 45 |
+
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
|
| 46 |
+
parser.add_argument('--load_size', type=int, default=672, help='scale images to this size')
|
| 47 |
+
parser.add_argument('--crop_size', type=int, default=672, help='then crop to this size')
|
| 48 |
+
parser.add_argument('--max_dataset_size', type=int, default=10000, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
|
| 49 |
+
parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
|
| 50 |
+
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
|
| 51 |
+
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
|
| 52 |
+
# additional parameters
|
| 53 |
+
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
|
| 54 |
+
parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
|
| 55 |
+
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
|
| 56 |
+
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
|
| 57 |
+
|
| 58 |
+
parser.add_argument('--data_dir', type=str, required=False,
|
| 59 |
+
help='input files directory images can be .png .jpg .tiff')
|
| 60 |
+
parser.add_argument('--output_dir', type=str, required=False,
|
| 61 |
+
help='result dir. result depth will be png. vides are JMPG as avi')
|
| 62 |
+
parser.add_argument('--savecrops', type=int, required=False)
|
| 63 |
+
parser.add_argument('--savewholeest', type=int, required=False)
|
| 64 |
+
parser.add_argument('--output_resolution', type=int, required=False,
|
| 65 |
+
help='0 for no restriction 1 for resize to input size')
|
| 66 |
+
parser.add_argument('--net_receptive_field_size', type=int, required=False)
|
| 67 |
+
parser.add_argument('--pix2pixsize', type=int, required=False)
|
| 68 |
+
parser.add_argument('--generatevideo', type=int, required=False)
|
| 69 |
+
parser.add_argument('--depthNet', type=int, required=False, help='0: midas 1:strurturedRL')
|
| 70 |
+
parser.add_argument('--R0', action='store_true')
|
| 71 |
+
parser.add_argument('--R20', action='store_true')
|
| 72 |
+
parser.add_argument('--Final', action='store_true')
|
| 73 |
+
parser.add_argument('--colorize_results', action='store_true')
|
| 74 |
+
parser.add_argument('--max_res', type=float, default=np.inf)
|
| 75 |
+
|
| 76 |
+
self.initialized = True
|
| 77 |
+
return parser
|
| 78 |
+
|
| 79 |
+
def gather_options(self):
|
| 80 |
+
"""Initialize our parser with basic options(only once).
|
| 81 |
+
Add additional model-specific and dataset-specific options.
|
| 82 |
+
These options are defined in the <modify_commandline_options> function
|
| 83 |
+
in model and dataset classes.
|
| 84 |
+
"""
|
| 85 |
+
if not self.initialized: # check if it has been initialized
|
| 86 |
+
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
| 87 |
+
parser = self.initialize(parser)
|
| 88 |
+
|
| 89 |
+
# get the basic options
|
| 90 |
+
opt, _ = parser.parse_known_args()
|
| 91 |
+
|
| 92 |
+
# modify model-related parser options
|
| 93 |
+
model_name = opt.model
|
| 94 |
+
model_option_setter = models.get_option_setter(model_name)
|
| 95 |
+
parser = model_option_setter(parser, self.isTrain)
|
| 96 |
+
opt, _ = parser.parse_known_args() # parse again with new defaults
|
| 97 |
+
|
| 98 |
+
# modify dataset-related parser options
|
| 99 |
+
# dataset_name = opt.dataset_mode
|
| 100 |
+
# dataset_option_setter = pix2pix.data.get_option_setter(dataset_name)
|
| 101 |
+
# parser = dataset_option_setter(parser, self.isTrain)
|
| 102 |
+
|
| 103 |
+
# save and return the parser
|
| 104 |
+
self.parser = parser
|
| 105 |
+
#return parser.parse_args() #EVIL
|
| 106 |
+
return opt
|
| 107 |
+
|
| 108 |
+
def print_options(self, opt):
|
| 109 |
+
"""Print and save options
|
| 110 |
+
|
| 111 |
+
It will print both current options and default values(if different).
|
| 112 |
+
It will save options into a text file / [checkpoints_dir] / opt.txt
|
| 113 |
+
"""
|
| 114 |
+
message = ''
|
| 115 |
+
message += '----------------- Options ---------------\n'
|
| 116 |
+
for k, v in sorted(vars(opt).items()):
|
| 117 |
+
comment = ''
|
| 118 |
+
default = self.parser.get_default(k)
|
| 119 |
+
if v != default:
|
| 120 |
+
comment = '\t[default: %s]' % str(default)
|
| 121 |
+
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
|
| 122 |
+
message += '----------------- End -------------------'
|
| 123 |
+
print(message)
|
| 124 |
+
|
| 125 |
+
# save to the disk
|
| 126 |
+
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
|
| 127 |
+
util.mkdirs(expr_dir)
|
| 128 |
+
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
|
| 129 |
+
with open(file_name, 'wt') as opt_file:
|
| 130 |
+
opt_file.write(message)
|
| 131 |
+
opt_file.write('\n')
|
| 132 |
+
|
| 133 |
+
def parse(self):
|
| 134 |
+
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
|
| 135 |
+
opt = self.gather_options()
|
| 136 |
+
opt.isTrain = self.isTrain # train or test
|
| 137 |
+
|
| 138 |
+
# process opt.suffix
|
| 139 |
+
if opt.suffix:
|
| 140 |
+
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
|
| 141 |
+
opt.name = opt.name + suffix
|
| 142 |
+
|
| 143 |
+
#self.print_options(opt)
|
| 144 |
+
|
| 145 |
+
# set gpu ids
|
| 146 |
+
str_ids = opt.gpu_ids.split(',')
|
| 147 |
+
opt.gpu_ids = []
|
| 148 |
+
for str_id in str_ids:
|
| 149 |
+
id = int(str_id)
|
| 150 |
+
if id >= 0:
|
| 151 |
+
opt.gpu_ids.append(id)
|
| 152 |
+
#if len(opt.gpu_ids) > 0:
|
| 153 |
+
# torch.cuda.set_device(opt.gpu_ids[0])
|
| 154 |
+
|
| 155 |
+
self.opt = opt
|
| 156 |
+
return self.opt
|
RAVE-main/annotator/leres/pix2pix/options/test_options.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .base_options import BaseOptions
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class TestOptions(BaseOptions):
|
| 5 |
+
"""This class includes test options.
|
| 6 |
+
|
| 7 |
+
It also includes shared options defined in BaseOptions.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
def initialize(self, parser):
|
| 11 |
+
parser = BaseOptions.initialize(self, parser) # define shared options
|
| 12 |
+
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
|
| 13 |
+
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
|
| 14 |
+
# Dropout and Batchnorm has different behavioir during training and test.
|
| 15 |
+
parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
|
| 16 |
+
parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
|
| 17 |
+
# rewrite devalue values
|
| 18 |
+
parser.set_defaults(model='pix2pix4depth')
|
| 19 |
+
# To avoid cropping, the load_size should be the same as crop_size
|
| 20 |
+
parser.set_defaults(load_size=parser.get_default('crop_size'))
|
| 21 |
+
self.isTrain = False
|
| 22 |
+
return parser
|
RAVE-main/annotator/normalbae/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022 Caroline Chan
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
RAVE-main/annotator/normalbae/__init__.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import types
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
from einops import rearrange
|
| 7 |
+
from .models.NNET import NNET
|
| 8 |
+
from modules import devices
|
| 9 |
+
from annotator.annotator_path import models_path
|
| 10 |
+
import torchvision.transforms as transforms
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# load model
|
| 14 |
+
def load_checkpoint(fpath, model):
|
| 15 |
+
ckpt = torch.load(fpath, map_location='cpu')['model']
|
| 16 |
+
|
| 17 |
+
load_dict = {}
|
| 18 |
+
for k, v in ckpt.items():
|
| 19 |
+
if k.startswith('module.'):
|
| 20 |
+
k_ = k.replace('module.', '')
|
| 21 |
+
load_dict[k_] = v
|
| 22 |
+
else:
|
| 23 |
+
load_dict[k] = v
|
| 24 |
+
|
| 25 |
+
model.load_state_dict(load_dict)
|
| 26 |
+
return model
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class NormalBaeDetector:
|
| 30 |
+
model_dir = os.path.join(models_path, "normal_bae")
|
| 31 |
+
|
| 32 |
+
def __init__(self):
|
| 33 |
+
self.model = None
|
| 34 |
+
self.device = devices.get_device_for("controlnet")
|
| 35 |
+
|
| 36 |
+
def load_model(self):
|
| 37 |
+
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/scannet.pt"
|
| 38 |
+
modelpath = os.path.join(self.model_dir, "scannet.pt")
|
| 39 |
+
if not os.path.exists(modelpath):
|
| 40 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 41 |
+
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
| 42 |
+
args = types.SimpleNamespace()
|
| 43 |
+
args.mode = 'client'
|
| 44 |
+
args.architecture = 'BN'
|
| 45 |
+
args.pretrained = 'scannet'
|
| 46 |
+
args.sampling_ratio = 0.4
|
| 47 |
+
args.importance_ratio = 0.7
|
| 48 |
+
model = NNET(args)
|
| 49 |
+
model = load_checkpoint(modelpath, model)
|
| 50 |
+
model.eval()
|
| 51 |
+
self.model = model.to(self.device)
|
| 52 |
+
self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 53 |
+
|
| 54 |
+
def unload_model(self):
|
| 55 |
+
if self.model is not None:
|
| 56 |
+
self.model.cpu()
|
| 57 |
+
|
| 58 |
+
def __call__(self, input_image):
|
| 59 |
+
if self.model is None:
|
| 60 |
+
self.load_model()
|
| 61 |
+
|
| 62 |
+
self.model.to(self.device)
|
| 63 |
+
assert input_image.ndim == 3
|
| 64 |
+
image_normal = input_image
|
| 65 |
+
with torch.no_grad():
|
| 66 |
+
image_normal = torch.from_numpy(image_normal).float().to(self.device)
|
| 67 |
+
image_normal = image_normal / 255.0
|
| 68 |
+
image_normal = rearrange(image_normal, 'h w c -> 1 c h w')
|
| 69 |
+
image_normal = self.norm(image_normal)
|
| 70 |
+
|
| 71 |
+
normal = self.model(image_normal)
|
| 72 |
+
normal = normal[0][-1][:, :3]
|
| 73 |
+
# d = torch.sum(normal ** 2.0, dim=1, keepdim=True) ** 0.5
|
| 74 |
+
# d = torch.maximum(d, torch.ones_like(d) * 1e-5)
|
| 75 |
+
# normal /= d
|
| 76 |
+
normal = ((normal + 1) * 0.5).clip(0, 1)
|
| 77 |
+
|
| 78 |
+
normal = rearrange(normal[0], 'c h w -> h w c').cpu().numpy()
|
| 79 |
+
normal_image = (normal * 255.0).clip(0, 255).astype(np.uint8)
|
| 80 |
+
|
| 81 |
+
return normal_image
|
RAVE-main/annotator/normalbae/models/NNET.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from .submodules.encoder import Encoder
|
| 6 |
+
from .submodules.decoder import Decoder
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class NNET(nn.Module):
|
| 10 |
+
def __init__(self, args):
|
| 11 |
+
super(NNET, self).__init__()
|
| 12 |
+
self.encoder = Encoder()
|
| 13 |
+
self.decoder = Decoder(args)
|
| 14 |
+
|
| 15 |
+
def get_1x_lr_params(self): # lr/10 learning rate
|
| 16 |
+
return self.encoder.parameters()
|
| 17 |
+
|
| 18 |
+
def get_10x_lr_params(self): # lr learning rate
|
| 19 |
+
return self.decoder.parameters()
|
| 20 |
+
|
| 21 |
+
def forward(self, img, **kwargs):
|
| 22 |
+
return self.decoder(self.encoder(img), **kwargs)
|
RAVE-main/annotator/normalbae/models/baseline.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
from .submodules.submodules import UpSampleBN, norm_normalize
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# This is the baseline encoder-decoder we used in the ablation study
|
| 9 |
+
class NNET(nn.Module):
|
| 10 |
+
def __init__(self, args=None):
|
| 11 |
+
super(NNET, self).__init__()
|
| 12 |
+
self.encoder = Encoder()
|
| 13 |
+
self.decoder = Decoder(num_classes=4)
|
| 14 |
+
|
| 15 |
+
def forward(self, x, **kwargs):
|
| 16 |
+
out = self.decoder(self.encoder(x), **kwargs)
|
| 17 |
+
|
| 18 |
+
# Bilinearly upsample the output to match the input resolution
|
| 19 |
+
up_out = F.interpolate(out, size=[x.size(2), x.size(3)], mode='bilinear', align_corners=False)
|
| 20 |
+
|
| 21 |
+
# L2-normalize the first three channels / ensure positive value for concentration parameters (kappa)
|
| 22 |
+
up_out = norm_normalize(up_out)
|
| 23 |
+
return up_out
|
| 24 |
+
|
| 25 |
+
def get_1x_lr_params(self): # lr/10 learning rate
|
| 26 |
+
return self.encoder.parameters()
|
| 27 |
+
|
| 28 |
+
def get_10x_lr_params(self): # lr learning rate
|
| 29 |
+
modules = [self.decoder]
|
| 30 |
+
for m in modules:
|
| 31 |
+
yield from m.parameters()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Encoder
|
| 35 |
+
class Encoder(nn.Module):
|
| 36 |
+
def __init__(self):
|
| 37 |
+
super(Encoder, self).__init__()
|
| 38 |
+
|
| 39 |
+
basemodel_name = 'tf_efficientnet_b5_ap'
|
| 40 |
+
basemodel = torch.hub.load('rwightman/gen-efficientnet-pytorch', basemodel_name, pretrained=True)
|
| 41 |
+
|
| 42 |
+
# Remove last layer
|
| 43 |
+
basemodel.global_pool = nn.Identity()
|
| 44 |
+
basemodel.classifier = nn.Identity()
|
| 45 |
+
|
| 46 |
+
self.original_model = basemodel
|
| 47 |
+
|
| 48 |
+
def forward(self, x):
|
| 49 |
+
features = [x]
|
| 50 |
+
for k, v in self.original_model._modules.items():
|
| 51 |
+
if (k == 'blocks'):
|
| 52 |
+
for ki, vi in v._modules.items():
|
| 53 |
+
features.append(vi(features[-1]))
|
| 54 |
+
else:
|
| 55 |
+
features.append(v(features[-1]))
|
| 56 |
+
return features
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# Decoder (no pixel-wise MLP, no uncertainty-guided sampling)
|
| 60 |
+
class Decoder(nn.Module):
|
| 61 |
+
def __init__(self, num_classes=4):
|
| 62 |
+
super(Decoder, self).__init__()
|
| 63 |
+
self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
|
| 64 |
+
self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024)
|
| 65 |
+
self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512)
|
| 66 |
+
self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256)
|
| 67 |
+
self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128)
|
| 68 |
+
self.conv3 = nn.Conv2d(128, num_classes, kernel_size=3, stride=1, padding=1)
|
| 69 |
+
|
| 70 |
+
def forward(self, features):
|
| 71 |
+
x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11]
|
| 72 |
+
x_d0 = self.conv2(x_block4)
|
| 73 |
+
x_d1 = self.up1(x_d0, x_block3)
|
| 74 |
+
x_d2 = self.up2(x_d1, x_block2)
|
| 75 |
+
x_d3 = self.up3(x_d2, x_block1)
|
| 76 |
+
x_d4 = self.up4(x_d3, x_block0)
|
| 77 |
+
out = self.conv3(x_d4)
|
| 78 |
+
return out
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
if __name__ == '__main__':
|
| 82 |
+
model = Baseline()
|
| 83 |
+
x = torch.rand(2, 3, 480, 640)
|
| 84 |
+
out = model(x)
|
| 85 |
+
print(out.shape)
|
RAVE-main/annotator/normalbae/models/submodules/decoder.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
from .submodules import UpSampleBN, UpSampleGN, norm_normalize, sample_points
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Decoder(nn.Module):
|
| 8 |
+
def __init__(self, args):
|
| 9 |
+
super(Decoder, self).__init__()
|
| 10 |
+
|
| 11 |
+
# hyper-parameter for sampling
|
| 12 |
+
self.sampling_ratio = args.sampling_ratio
|
| 13 |
+
self.importance_ratio = args.importance_ratio
|
| 14 |
+
|
| 15 |
+
# feature-map
|
| 16 |
+
self.conv2 = nn.Conv2d(2048, 2048, kernel_size=1, stride=1, padding=0)
|
| 17 |
+
if args.architecture == 'BN':
|
| 18 |
+
self.up1 = UpSampleBN(skip_input=2048 + 176, output_features=1024)
|
| 19 |
+
self.up2 = UpSampleBN(skip_input=1024 + 64, output_features=512)
|
| 20 |
+
self.up3 = UpSampleBN(skip_input=512 + 40, output_features=256)
|
| 21 |
+
self.up4 = UpSampleBN(skip_input=256 + 24, output_features=128)
|
| 22 |
+
|
| 23 |
+
elif args.architecture == 'GN':
|
| 24 |
+
self.up1 = UpSampleGN(skip_input=2048 + 176, output_features=1024)
|
| 25 |
+
self.up2 = UpSampleGN(skip_input=1024 + 64, output_features=512)
|
| 26 |
+
self.up3 = UpSampleGN(skip_input=512 + 40, output_features=256)
|
| 27 |
+
self.up4 = UpSampleGN(skip_input=256 + 24, output_features=128)
|
| 28 |
+
|
| 29 |
+
else:
|
| 30 |
+
raise Exception('invalid architecture')
|
| 31 |
+
|
| 32 |
+
# produces 1/8 res output
|
| 33 |
+
self.out_conv_res8 = nn.Conv2d(512, 4, kernel_size=3, stride=1, padding=1)
|
| 34 |
+
|
| 35 |
+
# produces 1/4 res output
|
| 36 |
+
self.out_conv_res4 = nn.Sequential(
|
| 37 |
+
nn.Conv1d(512 + 4, 128, kernel_size=1), nn.ReLU(),
|
| 38 |
+
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
|
| 39 |
+
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
|
| 40 |
+
nn.Conv1d(128, 4, kernel_size=1),
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# produces 1/2 res output
|
| 44 |
+
self.out_conv_res2 = nn.Sequential(
|
| 45 |
+
nn.Conv1d(256 + 4, 128, kernel_size=1), nn.ReLU(),
|
| 46 |
+
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
|
| 47 |
+
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
|
| 48 |
+
nn.Conv1d(128, 4, kernel_size=1),
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# produces 1/1 res output
|
| 52 |
+
self.out_conv_res1 = nn.Sequential(
|
| 53 |
+
nn.Conv1d(128 + 4, 128, kernel_size=1), nn.ReLU(),
|
| 54 |
+
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
|
| 55 |
+
nn.Conv1d(128, 128, kernel_size=1), nn.ReLU(),
|
| 56 |
+
nn.Conv1d(128, 4, kernel_size=1),
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
def forward(self, features, gt_norm_mask=None, mode='test'):
|
| 60 |
+
x_block0, x_block1, x_block2, x_block3, x_block4 = features[4], features[5], features[6], features[8], features[11]
|
| 61 |
+
|
| 62 |
+
# generate feature-map
|
| 63 |
+
|
| 64 |
+
x_d0 = self.conv2(x_block4) # x_d0 : [2, 2048, 15, 20] 1/32 res
|
| 65 |
+
x_d1 = self.up1(x_d0, x_block3) # x_d1 : [2, 1024, 30, 40] 1/16 res
|
| 66 |
+
x_d2 = self.up2(x_d1, x_block2) # x_d2 : [2, 512, 60, 80] 1/8 res
|
| 67 |
+
x_d3 = self.up3(x_d2, x_block1) # x_d3: [2, 256, 120, 160] 1/4 res
|
| 68 |
+
x_d4 = self.up4(x_d3, x_block0) # x_d4: [2, 128, 240, 320] 1/2 res
|
| 69 |
+
|
| 70 |
+
# 1/8 res output
|
| 71 |
+
out_res8 = self.out_conv_res8(x_d2) # out_res8: [2, 4, 60, 80] 1/8 res output
|
| 72 |
+
out_res8 = norm_normalize(out_res8) # out_res8: [2, 4, 60, 80] 1/8 res output
|
| 73 |
+
|
| 74 |
+
################################################################################################################
|
| 75 |
+
# out_res4
|
| 76 |
+
################################################################################################################
|
| 77 |
+
|
| 78 |
+
if mode == 'train':
|
| 79 |
+
# upsampling ... out_res8: [2, 4, 60, 80] -> out_res8_res4: [2, 4, 120, 160]
|
| 80 |
+
out_res8_res4 = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True)
|
| 81 |
+
B, _, H, W = out_res8_res4.shape
|
| 82 |
+
|
| 83 |
+
# samples: [B, 1, N, 2]
|
| 84 |
+
point_coords_res4, rows_int, cols_int = sample_points(out_res8_res4.detach(), gt_norm_mask,
|
| 85 |
+
sampling_ratio=self.sampling_ratio,
|
| 86 |
+
beta=self.importance_ratio)
|
| 87 |
+
|
| 88 |
+
# output (needed for evaluation / visualization)
|
| 89 |
+
out_res4 = out_res8_res4
|
| 90 |
+
|
| 91 |
+
# grid_sample feature-map
|
| 92 |
+
feat_res4 = F.grid_sample(x_d2, point_coords_res4, mode='bilinear', align_corners=True) # (B, 512, 1, N)
|
| 93 |
+
init_pred = F.grid_sample(out_res8, point_coords_res4, mode='bilinear', align_corners=True) # (B, 4, 1, N)
|
| 94 |
+
feat_res4 = torch.cat([feat_res4, init_pred], dim=1) # (B, 512+4, 1, N)
|
| 95 |
+
|
| 96 |
+
# prediction (needed to compute loss)
|
| 97 |
+
samples_pred_res4 = self.out_conv_res4(feat_res4[:, :, 0, :]) # (B, 4, N)
|
| 98 |
+
samples_pred_res4 = norm_normalize(samples_pred_res4) # (B, 4, N) - normalized
|
| 99 |
+
|
| 100 |
+
for i in range(B):
|
| 101 |
+
out_res4[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res4[i, :, :]
|
| 102 |
+
|
| 103 |
+
else:
|
| 104 |
+
# grid_sample feature-map
|
| 105 |
+
feat_map = F.interpolate(x_d2, scale_factor=2, mode='bilinear', align_corners=True)
|
| 106 |
+
init_pred = F.interpolate(out_res8, scale_factor=2, mode='bilinear', align_corners=True)
|
| 107 |
+
feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
|
| 108 |
+
B, _, H, W = feat_map.shape
|
| 109 |
+
|
| 110 |
+
# try all pixels
|
| 111 |
+
out_res4 = self.out_conv_res4(feat_map.view(B, 512 + 4, -1)) # (B, 4, N)
|
| 112 |
+
out_res4 = norm_normalize(out_res4) # (B, 4, N) - normalized
|
| 113 |
+
out_res4 = out_res4.view(B, 4, H, W)
|
| 114 |
+
samples_pred_res4 = point_coords_res4 = None
|
| 115 |
+
|
| 116 |
+
################################################################################################################
|
| 117 |
+
# out_res2
|
| 118 |
+
################################################################################################################
|
| 119 |
+
|
| 120 |
+
if mode == 'train':
|
| 121 |
+
|
| 122 |
+
# upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320]
|
| 123 |
+
out_res4_res2 = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True)
|
| 124 |
+
B, _, H, W = out_res4_res2.shape
|
| 125 |
+
|
| 126 |
+
# samples: [B, 1, N, 2]
|
| 127 |
+
point_coords_res2, rows_int, cols_int = sample_points(out_res4_res2.detach(), gt_norm_mask,
|
| 128 |
+
sampling_ratio=self.sampling_ratio,
|
| 129 |
+
beta=self.importance_ratio)
|
| 130 |
+
|
| 131 |
+
# output (needed for evaluation / visualization)
|
| 132 |
+
out_res2 = out_res4_res2
|
| 133 |
+
|
| 134 |
+
# grid_sample feature-map
|
| 135 |
+
feat_res2 = F.grid_sample(x_d3, point_coords_res2, mode='bilinear', align_corners=True) # (B, 256, 1, N)
|
| 136 |
+
init_pred = F.grid_sample(out_res4, point_coords_res2, mode='bilinear', align_corners=True) # (B, 4, 1, N)
|
| 137 |
+
feat_res2 = torch.cat([feat_res2, init_pred], dim=1) # (B, 256+4, 1, N)
|
| 138 |
+
|
| 139 |
+
# prediction (needed to compute loss)
|
| 140 |
+
samples_pred_res2 = self.out_conv_res2(feat_res2[:, :, 0, :]) # (B, 4, N)
|
| 141 |
+
samples_pred_res2 = norm_normalize(samples_pred_res2) # (B, 4, N) - normalized
|
| 142 |
+
|
| 143 |
+
for i in range(B):
|
| 144 |
+
out_res2[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res2[i, :, :]
|
| 145 |
+
|
| 146 |
+
else:
|
| 147 |
+
# grid_sample feature-map
|
| 148 |
+
feat_map = F.interpolate(x_d3, scale_factor=2, mode='bilinear', align_corners=True)
|
| 149 |
+
init_pred = F.interpolate(out_res4, scale_factor=2, mode='bilinear', align_corners=True)
|
| 150 |
+
feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
|
| 151 |
+
B, _, H, W = feat_map.shape
|
| 152 |
+
|
| 153 |
+
out_res2 = self.out_conv_res2(feat_map.view(B, 256 + 4, -1)) # (B, 4, N)
|
| 154 |
+
out_res2 = norm_normalize(out_res2) # (B, 4, N) - normalized
|
| 155 |
+
out_res2 = out_res2.view(B, 4, H, W)
|
| 156 |
+
samples_pred_res2 = point_coords_res2 = None
|
| 157 |
+
|
| 158 |
+
################################################################################################################
|
| 159 |
+
# out_res1
|
| 160 |
+
################################################################################################################
|
| 161 |
+
|
| 162 |
+
if mode == 'train':
|
| 163 |
+
# upsampling ... out_res4: [2, 4, 120, 160] -> out_res4_res2: [2, 4, 240, 320]
|
| 164 |
+
out_res2_res1 = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True)
|
| 165 |
+
B, _, H, W = out_res2_res1.shape
|
| 166 |
+
|
| 167 |
+
# samples: [B, 1, N, 2]
|
| 168 |
+
point_coords_res1, rows_int, cols_int = sample_points(out_res2_res1.detach(), gt_norm_mask,
|
| 169 |
+
sampling_ratio=self.sampling_ratio,
|
| 170 |
+
beta=self.importance_ratio)
|
| 171 |
+
|
| 172 |
+
# output (needed for evaluation / visualization)
|
| 173 |
+
out_res1 = out_res2_res1
|
| 174 |
+
|
| 175 |
+
# grid_sample feature-map
|
| 176 |
+
feat_res1 = F.grid_sample(x_d4, point_coords_res1, mode='bilinear', align_corners=True) # (B, 128, 1, N)
|
| 177 |
+
init_pred = F.grid_sample(out_res2, point_coords_res1, mode='bilinear', align_corners=True) # (B, 4, 1, N)
|
| 178 |
+
feat_res1 = torch.cat([feat_res1, init_pred], dim=1) # (B, 128+4, 1, N)
|
| 179 |
+
|
| 180 |
+
# prediction (needed to compute loss)
|
| 181 |
+
samples_pred_res1 = self.out_conv_res1(feat_res1[:, :, 0, :]) # (B, 4, N)
|
| 182 |
+
samples_pred_res1 = norm_normalize(samples_pred_res1) # (B, 4, N) - normalized
|
| 183 |
+
|
| 184 |
+
for i in range(B):
|
| 185 |
+
out_res1[i, :, rows_int[i, :], cols_int[i, :]] = samples_pred_res1[i, :, :]
|
| 186 |
+
|
| 187 |
+
else:
|
| 188 |
+
# grid_sample feature-map
|
| 189 |
+
feat_map = F.interpolate(x_d4, scale_factor=2, mode='bilinear', align_corners=True)
|
| 190 |
+
init_pred = F.interpolate(out_res2, scale_factor=2, mode='bilinear', align_corners=True)
|
| 191 |
+
feat_map = torch.cat([feat_map, init_pred], dim=1) # (B, 512+4, H, W)
|
| 192 |
+
B, _, H, W = feat_map.shape
|
| 193 |
+
|
| 194 |
+
out_res1 = self.out_conv_res1(feat_map.view(B, 128 + 4, -1)) # (B, 4, N)
|
| 195 |
+
out_res1 = norm_normalize(out_res1) # (B, 4, N) - normalized
|
| 196 |
+
out_res1 = out_res1.view(B, 4, H, W)
|
| 197 |
+
samples_pred_res1 = point_coords_res1 = None
|
| 198 |
+
|
| 199 |
+
return [out_res8, out_res4, out_res2, out_res1], \
|
| 200 |
+
[out_res8, samples_pred_res4, samples_pred_res2, samples_pred_res1], \
|
| 201 |
+
[None, point_coords_res4, point_coords_res2, point_coords_res1]
|
| 202 |
+
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/.gitignore
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
*.egg-info/
|
| 24 |
+
.installed.cfg
|
| 25 |
+
*.egg
|
| 26 |
+
MANIFEST
|
| 27 |
+
|
| 28 |
+
# PyInstaller
|
| 29 |
+
# Usually these files are written by a python script from a template
|
| 30 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 31 |
+
*.manifest
|
| 32 |
+
*.spec
|
| 33 |
+
|
| 34 |
+
# Installer logs
|
| 35 |
+
pip-log.txt
|
| 36 |
+
pip-delete-this-directory.txt
|
| 37 |
+
|
| 38 |
+
# Unit test / coverage reports
|
| 39 |
+
htmlcov/
|
| 40 |
+
.tox/
|
| 41 |
+
.coverage
|
| 42 |
+
.coverage.*
|
| 43 |
+
.cache
|
| 44 |
+
nosetests.xml
|
| 45 |
+
coverage.xml
|
| 46 |
+
*.cover
|
| 47 |
+
.hypothesis/
|
| 48 |
+
.pytest_cache/
|
| 49 |
+
|
| 50 |
+
# Translations
|
| 51 |
+
*.mo
|
| 52 |
+
*.pot
|
| 53 |
+
|
| 54 |
+
# Django stuff:
|
| 55 |
+
*.log
|
| 56 |
+
local_settings.py
|
| 57 |
+
db.sqlite3
|
| 58 |
+
|
| 59 |
+
# Flask stuff:
|
| 60 |
+
instance/
|
| 61 |
+
.webassets-cache
|
| 62 |
+
|
| 63 |
+
# Scrapy stuff:
|
| 64 |
+
.scrapy
|
| 65 |
+
|
| 66 |
+
# Sphinx documentation
|
| 67 |
+
docs/_build/
|
| 68 |
+
|
| 69 |
+
# PyBuilder
|
| 70 |
+
target/
|
| 71 |
+
|
| 72 |
+
# Jupyter Notebook
|
| 73 |
+
.ipynb_checkpoints
|
| 74 |
+
|
| 75 |
+
# pyenv
|
| 76 |
+
.python-version
|
| 77 |
+
|
| 78 |
+
# celery beat schedule file
|
| 79 |
+
celerybeat-schedule
|
| 80 |
+
|
| 81 |
+
# SageMath parsed files
|
| 82 |
+
*.sage.py
|
| 83 |
+
|
| 84 |
+
# Environments
|
| 85 |
+
.env
|
| 86 |
+
.venv
|
| 87 |
+
env/
|
| 88 |
+
venv/
|
| 89 |
+
ENV/
|
| 90 |
+
env.bak/
|
| 91 |
+
venv.bak/
|
| 92 |
+
|
| 93 |
+
# Spyder project settings
|
| 94 |
+
.spyderproject
|
| 95 |
+
.spyproject
|
| 96 |
+
|
| 97 |
+
# Rope project settings
|
| 98 |
+
.ropeproject
|
| 99 |
+
|
| 100 |
+
# mkdocs documentation
|
| 101 |
+
/site
|
| 102 |
+
|
| 103 |
+
# pytorch stuff
|
| 104 |
+
*.pth
|
| 105 |
+
*.onnx
|
| 106 |
+
*.pb
|
| 107 |
+
|
| 108 |
+
trained_models/
|
| 109 |
+
.fuse_hidden*
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/caffe2_validate.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Caffe2 validation script
|
| 2 |
+
|
| 3 |
+
This script is created to verify exported ONNX models running in Caffe2
|
| 4 |
+
It utilizes the same PyTorch dataloader/processing pipeline for a
|
| 5 |
+
fair comparison against the originals.
|
| 6 |
+
|
| 7 |
+
Copyright 2020 Ross Wightman
|
| 8 |
+
"""
|
| 9 |
+
import argparse
|
| 10 |
+
import numpy as np
|
| 11 |
+
from caffe2.python import core, workspace, model_helper
|
| 12 |
+
from caffe2.proto import caffe2_pb2
|
| 13 |
+
from data import create_loader, resolve_data_config, Dataset
|
| 14 |
+
from utils import AverageMeter
|
| 15 |
+
import time
|
| 16 |
+
|
| 17 |
+
parser = argparse.ArgumentParser(description='Caffe2 ImageNet Validation')
|
| 18 |
+
parser.add_argument('data', metavar='DIR',
|
| 19 |
+
help='path to dataset')
|
| 20 |
+
parser.add_argument('--c2-prefix', default='', type=str, metavar='NAME',
|
| 21 |
+
help='caffe2 model pb name prefix')
|
| 22 |
+
parser.add_argument('--c2-init', default='', type=str, metavar='PATH',
|
| 23 |
+
help='caffe2 model init .pb')
|
| 24 |
+
parser.add_argument('--c2-predict', default='', type=str, metavar='PATH',
|
| 25 |
+
help='caffe2 model predict .pb')
|
| 26 |
+
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
|
| 27 |
+
help='number of data loading workers (default: 2)')
|
| 28 |
+
parser.add_argument('-b', '--batch-size', default=256, type=int,
|
| 29 |
+
metavar='N', help='mini-batch size (default: 256)')
|
| 30 |
+
parser.add_argument('--img-size', default=None, type=int,
|
| 31 |
+
metavar='N', help='Input image dimension, uses model default if empty')
|
| 32 |
+
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
|
| 33 |
+
help='Override mean pixel value of dataset')
|
| 34 |
+
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
|
| 35 |
+
help='Override std deviation of of dataset')
|
| 36 |
+
parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT',
|
| 37 |
+
help='Override default crop pct of 0.875')
|
| 38 |
+
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
|
| 39 |
+
help='Image resize interpolation type (overrides model)')
|
| 40 |
+
parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true',
|
| 41 |
+
help='use tensorflow mnasnet preporcessing')
|
| 42 |
+
parser.add_argument('--print-freq', '-p', default=10, type=int,
|
| 43 |
+
metavar='N', help='print frequency (default: 10)')
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main():
|
| 47 |
+
args = parser.parse_args()
|
| 48 |
+
args.gpu_id = 0
|
| 49 |
+
if args.c2_prefix:
|
| 50 |
+
args.c2_init = args.c2_prefix + '.init.pb'
|
| 51 |
+
args.c2_predict = args.c2_prefix + '.predict.pb'
|
| 52 |
+
|
| 53 |
+
model = model_helper.ModelHelper(name="validation_net", init_params=False)
|
| 54 |
+
|
| 55 |
+
# Bring in the init net from init_net.pb
|
| 56 |
+
init_net_proto = caffe2_pb2.NetDef()
|
| 57 |
+
with open(args.c2_init, "rb") as f:
|
| 58 |
+
init_net_proto.ParseFromString(f.read())
|
| 59 |
+
model.param_init_net = core.Net(init_net_proto)
|
| 60 |
+
|
| 61 |
+
# bring in the predict net from predict_net.pb
|
| 62 |
+
predict_net_proto = caffe2_pb2.NetDef()
|
| 63 |
+
with open(args.c2_predict, "rb") as f:
|
| 64 |
+
predict_net_proto.ParseFromString(f.read())
|
| 65 |
+
model.net = core.Net(predict_net_proto)
|
| 66 |
+
|
| 67 |
+
data_config = resolve_data_config(None, args)
|
| 68 |
+
loader = create_loader(
|
| 69 |
+
Dataset(args.data, load_bytes=args.tf_preprocessing),
|
| 70 |
+
input_size=data_config['input_size'],
|
| 71 |
+
batch_size=args.batch_size,
|
| 72 |
+
use_prefetcher=False,
|
| 73 |
+
interpolation=data_config['interpolation'],
|
| 74 |
+
mean=data_config['mean'],
|
| 75 |
+
std=data_config['std'],
|
| 76 |
+
num_workers=args.workers,
|
| 77 |
+
crop_pct=data_config['crop_pct'],
|
| 78 |
+
tensorflow_preprocessing=args.tf_preprocessing)
|
| 79 |
+
|
| 80 |
+
# this is so obvious, wonderful interface </sarcasm>
|
| 81 |
+
input_blob = model.net.external_inputs[0]
|
| 82 |
+
output_blob = model.net.external_outputs[0]
|
| 83 |
+
|
| 84 |
+
if True:
|
| 85 |
+
device_opts = None
|
| 86 |
+
else:
|
| 87 |
+
# CUDA is crashing, no idea why, awesome error message, give it a try for kicks
|
| 88 |
+
device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id)
|
| 89 |
+
model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
|
| 90 |
+
model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
|
| 91 |
+
|
| 92 |
+
model.param_init_net.GaussianFill(
|
| 93 |
+
[], input_blob.GetUnscopedName(),
|
| 94 |
+
shape=(1,) + data_config['input_size'], mean=0.0, std=1.0)
|
| 95 |
+
workspace.RunNetOnce(model.param_init_net)
|
| 96 |
+
workspace.CreateNet(model.net, overwrite=True)
|
| 97 |
+
|
| 98 |
+
batch_time = AverageMeter()
|
| 99 |
+
top1 = AverageMeter()
|
| 100 |
+
top5 = AverageMeter()
|
| 101 |
+
end = time.time()
|
| 102 |
+
for i, (input, target) in enumerate(loader):
|
| 103 |
+
# run the net and return prediction
|
| 104 |
+
caffe2_in = input.data.numpy()
|
| 105 |
+
workspace.FeedBlob(input_blob, caffe2_in, device_opts)
|
| 106 |
+
workspace.RunNet(model.net, num_iter=1)
|
| 107 |
+
output = workspace.FetchBlob(output_blob)
|
| 108 |
+
|
| 109 |
+
# measure accuracy and record loss
|
| 110 |
+
prec1, prec5 = accuracy_np(output.data, target.numpy())
|
| 111 |
+
top1.update(prec1.item(), input.size(0))
|
| 112 |
+
top5.update(prec5.item(), input.size(0))
|
| 113 |
+
|
| 114 |
+
# measure elapsed time
|
| 115 |
+
batch_time.update(time.time() - end)
|
| 116 |
+
end = time.time()
|
| 117 |
+
|
| 118 |
+
if i % args.print_freq == 0:
|
| 119 |
+
print('Test: [{0}/{1}]\t'
|
| 120 |
+
'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s, {ms_avg:.3f} ms/sample) \t'
|
| 121 |
+
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
|
| 122 |
+
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
|
| 123 |
+
i, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg,
|
| 124 |
+
ms_avg=100 * batch_time.avg / input.size(0), top1=top1, top5=top5))
|
| 125 |
+
|
| 126 |
+
print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(
|
| 127 |
+
top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg))
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def accuracy_np(output, target):
|
| 131 |
+
max_indices = np.argsort(output, axis=1)[:, ::-1]
|
| 132 |
+
top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean()
|
| 133 |
+
top1 = 100 * np.equal(max_indices[:, 0], target).mean()
|
| 134 |
+
return top1, top5
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
if __name__ == '__main__':
|
| 138 |
+
main()
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/__init__.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from geffnet import config
|
| 2 |
+
from geffnet.activations.activations_me import *
|
| 3 |
+
from geffnet.activations.activations_jit import *
|
| 4 |
+
from geffnet.activations.activations import *
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
_has_silu = 'silu' in dir(torch.nn.functional)
|
| 8 |
+
|
| 9 |
+
_ACT_FN_DEFAULT = dict(
|
| 10 |
+
silu=F.silu if _has_silu else swish,
|
| 11 |
+
swish=F.silu if _has_silu else swish,
|
| 12 |
+
mish=mish,
|
| 13 |
+
relu=F.relu,
|
| 14 |
+
relu6=F.relu6,
|
| 15 |
+
sigmoid=sigmoid,
|
| 16 |
+
tanh=tanh,
|
| 17 |
+
hard_sigmoid=hard_sigmoid,
|
| 18 |
+
hard_swish=hard_swish,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
_ACT_FN_JIT = dict(
|
| 22 |
+
silu=F.silu if _has_silu else swish_jit,
|
| 23 |
+
swish=F.silu if _has_silu else swish_jit,
|
| 24 |
+
mish=mish_jit,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
_ACT_FN_ME = dict(
|
| 28 |
+
silu=F.silu if _has_silu else swish_me,
|
| 29 |
+
swish=F.silu if _has_silu else swish_me,
|
| 30 |
+
mish=mish_me,
|
| 31 |
+
hard_swish=hard_swish_me,
|
| 32 |
+
hard_sigmoid_jit=hard_sigmoid_me,
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
_ACT_LAYER_DEFAULT = dict(
|
| 36 |
+
silu=nn.SiLU if _has_silu else Swish,
|
| 37 |
+
swish=nn.SiLU if _has_silu else Swish,
|
| 38 |
+
mish=Mish,
|
| 39 |
+
relu=nn.ReLU,
|
| 40 |
+
relu6=nn.ReLU6,
|
| 41 |
+
sigmoid=Sigmoid,
|
| 42 |
+
tanh=Tanh,
|
| 43 |
+
hard_sigmoid=HardSigmoid,
|
| 44 |
+
hard_swish=HardSwish,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
_ACT_LAYER_JIT = dict(
|
| 48 |
+
silu=nn.SiLU if _has_silu else SwishJit,
|
| 49 |
+
swish=nn.SiLU if _has_silu else SwishJit,
|
| 50 |
+
mish=MishJit,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
_ACT_LAYER_ME = dict(
|
| 54 |
+
silu=nn.SiLU if _has_silu else SwishMe,
|
| 55 |
+
swish=nn.SiLU if _has_silu else SwishMe,
|
| 56 |
+
mish=MishMe,
|
| 57 |
+
hard_swish=HardSwishMe,
|
| 58 |
+
hard_sigmoid=HardSigmoidMe
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
_OVERRIDE_FN = dict()
|
| 62 |
+
_OVERRIDE_LAYER = dict()
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def add_override_act_fn(name, fn):
|
| 66 |
+
global _OVERRIDE_FN
|
| 67 |
+
_OVERRIDE_FN[name] = fn
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def update_override_act_fn(overrides):
|
| 71 |
+
assert isinstance(overrides, dict)
|
| 72 |
+
global _OVERRIDE_FN
|
| 73 |
+
_OVERRIDE_FN.update(overrides)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def clear_override_act_fn():
|
| 77 |
+
global _OVERRIDE_FN
|
| 78 |
+
_OVERRIDE_FN = dict()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def add_override_act_layer(name, fn):
|
| 82 |
+
_OVERRIDE_LAYER[name] = fn
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def update_override_act_layer(overrides):
|
| 86 |
+
assert isinstance(overrides, dict)
|
| 87 |
+
global _OVERRIDE_LAYER
|
| 88 |
+
_OVERRIDE_LAYER.update(overrides)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def clear_override_act_layer():
|
| 92 |
+
global _OVERRIDE_LAYER
|
| 93 |
+
_OVERRIDE_LAYER = dict()
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def get_act_fn(name='relu'):
|
| 97 |
+
""" Activation Function Factory
|
| 98 |
+
Fetching activation fns by name with this function allows export or torch script friendly
|
| 99 |
+
functions to be returned dynamically based on current config.
|
| 100 |
+
"""
|
| 101 |
+
if name in _OVERRIDE_FN:
|
| 102 |
+
return _OVERRIDE_FN[name]
|
| 103 |
+
use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit())
|
| 104 |
+
if use_me and name in _ACT_FN_ME:
|
| 105 |
+
# If not exporting or scripting the model, first look for a memory optimized version
|
| 106 |
+
# activation with custom autograd, then fallback to jit scripted, then a Python or Torch builtin
|
| 107 |
+
return _ACT_FN_ME[name]
|
| 108 |
+
if config.is_exportable() and name in ('silu', 'swish'):
|
| 109 |
+
# FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
|
| 110 |
+
return swish
|
| 111 |
+
use_jit = not (config.is_exportable() or config.is_no_jit())
|
| 112 |
+
# NOTE: export tracing should work with jit scripted components, but I keep running into issues
|
| 113 |
+
if use_jit and name in _ACT_FN_JIT: # jit scripted models should be okay for export/scripting
|
| 114 |
+
return _ACT_FN_JIT[name]
|
| 115 |
+
return _ACT_FN_DEFAULT[name]
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def get_act_layer(name='relu'):
|
| 119 |
+
""" Activation Layer Factory
|
| 120 |
+
Fetching activation layers by name with this function allows export or torch script friendly
|
| 121 |
+
functions to be returned dynamically based on current config.
|
| 122 |
+
"""
|
| 123 |
+
if name in _OVERRIDE_LAYER:
|
| 124 |
+
return _OVERRIDE_LAYER[name]
|
| 125 |
+
use_me = not (config.is_exportable() or config.is_scriptable() or config.is_no_jit())
|
| 126 |
+
if use_me and name in _ACT_LAYER_ME:
|
| 127 |
+
return _ACT_LAYER_ME[name]
|
| 128 |
+
if config.is_exportable() and name in ('silu', 'swish'):
|
| 129 |
+
# FIXME PyTorch SiLU doesn't ONNX export, this is a temp hack
|
| 130 |
+
return Swish
|
| 131 |
+
use_jit = not (config.is_exportable() or config.is_no_jit())
|
| 132 |
+
# NOTE: export tracing should work with jit scripted components, but I keep running into issues
|
| 133 |
+
if use_jit and name in _ACT_FN_JIT: # jit scripted models should be okay for export/scripting
|
| 134 |
+
return _ACT_LAYER_JIT[name]
|
| 135 |
+
return _ACT_LAYER_DEFAULT[name]
|
| 136 |
+
|
| 137 |
+
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/activations.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Activations
|
| 2 |
+
|
| 3 |
+
A collection of activations fn and modules with a common interface so that they can
|
| 4 |
+
easily be swapped. All have an `inplace` arg even if not used.
|
| 5 |
+
|
| 6 |
+
Copyright 2020 Ross Wightman
|
| 7 |
+
"""
|
| 8 |
+
from torch import nn as nn
|
| 9 |
+
from torch.nn import functional as F
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def swish(x, inplace: bool = False):
|
| 13 |
+
"""Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)
|
| 14 |
+
and also as Swish (https://arxiv.org/abs/1710.05941).
|
| 15 |
+
|
| 16 |
+
TODO Rename to SiLU with addition to PyTorch
|
| 17 |
+
"""
|
| 18 |
+
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class Swish(nn.Module):
|
| 22 |
+
def __init__(self, inplace: bool = False):
|
| 23 |
+
super(Swish, self).__init__()
|
| 24 |
+
self.inplace = inplace
|
| 25 |
+
|
| 26 |
+
def forward(self, x):
|
| 27 |
+
return swish(x, self.inplace)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def mish(x, inplace: bool = False):
|
| 31 |
+
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
|
| 32 |
+
"""
|
| 33 |
+
return x.mul(F.softplus(x).tanh())
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class Mish(nn.Module):
|
| 37 |
+
def __init__(self, inplace: bool = False):
|
| 38 |
+
super(Mish, self).__init__()
|
| 39 |
+
self.inplace = inplace
|
| 40 |
+
|
| 41 |
+
def forward(self, x):
|
| 42 |
+
return mish(x, self.inplace)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def sigmoid(x, inplace: bool = False):
|
| 46 |
+
return x.sigmoid_() if inplace else x.sigmoid()
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# PyTorch has this, but not with a consistent inplace argmument interface
|
| 50 |
+
class Sigmoid(nn.Module):
|
| 51 |
+
def __init__(self, inplace: bool = False):
|
| 52 |
+
super(Sigmoid, self).__init__()
|
| 53 |
+
self.inplace = inplace
|
| 54 |
+
|
| 55 |
+
def forward(self, x):
|
| 56 |
+
return x.sigmoid_() if self.inplace else x.sigmoid()
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def tanh(x, inplace: bool = False):
|
| 60 |
+
return x.tanh_() if inplace else x.tanh()
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# PyTorch has this, but not with a consistent inplace argmument interface
|
| 64 |
+
class Tanh(nn.Module):
|
| 65 |
+
def __init__(self, inplace: bool = False):
|
| 66 |
+
super(Tanh, self).__init__()
|
| 67 |
+
self.inplace = inplace
|
| 68 |
+
|
| 69 |
+
def forward(self, x):
|
| 70 |
+
return x.tanh_() if self.inplace else x.tanh()
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def hard_swish(x, inplace: bool = False):
|
| 74 |
+
inner = F.relu6(x + 3.).div_(6.)
|
| 75 |
+
return x.mul_(inner) if inplace else x.mul(inner)
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class HardSwish(nn.Module):
|
| 79 |
+
def __init__(self, inplace: bool = False):
|
| 80 |
+
super(HardSwish, self).__init__()
|
| 81 |
+
self.inplace = inplace
|
| 82 |
+
|
| 83 |
+
def forward(self, x):
|
| 84 |
+
return hard_swish(x, self.inplace)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def hard_sigmoid(x, inplace: bool = False):
|
| 88 |
+
if inplace:
|
| 89 |
+
return x.add_(3.).clamp_(0., 6.).div_(6.)
|
| 90 |
+
else:
|
| 91 |
+
return F.relu6(x + 3.) / 6.
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class HardSigmoid(nn.Module):
|
| 95 |
+
def __init__(self, inplace: bool = False):
|
| 96 |
+
super(HardSigmoid, self).__init__()
|
| 97 |
+
self.inplace = inplace
|
| 98 |
+
|
| 99 |
+
def forward(self, x):
|
| 100 |
+
return hard_sigmoid(x, self.inplace)
|
| 101 |
+
|
| 102 |
+
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/activations_jit.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Activations (jit)
|
| 2 |
+
|
| 3 |
+
A collection of jit-scripted activations fn and modules with a common interface so that they can
|
| 4 |
+
easily be swapped. All have an `inplace` arg even if not used.
|
| 5 |
+
|
| 6 |
+
All jit scripted activations are lacking in-place variations on purpose, scripted kernel fusion does not
|
| 7 |
+
currently work across in-place op boundaries, thus performance is equal to or less than the non-scripted
|
| 8 |
+
versions if they contain in-place ops.
|
| 9 |
+
|
| 10 |
+
Copyright 2020 Ross Wightman
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
from torch import nn as nn
|
| 15 |
+
from torch.nn import functional as F
|
| 16 |
+
|
| 17 |
+
__all__ = ['swish_jit', 'SwishJit', 'mish_jit', 'MishJit',
|
| 18 |
+
'hard_sigmoid_jit', 'HardSigmoidJit', 'hard_swish_jit', 'HardSwishJit']
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@torch.jit.script
|
| 22 |
+
def swish_jit(x, inplace: bool = False):
|
| 23 |
+
"""Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)
|
| 24 |
+
and also as Swish (https://arxiv.org/abs/1710.05941).
|
| 25 |
+
|
| 26 |
+
TODO Rename to SiLU with addition to PyTorch
|
| 27 |
+
"""
|
| 28 |
+
return x.mul(x.sigmoid())
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@torch.jit.script
|
| 32 |
+
def mish_jit(x, _inplace: bool = False):
|
| 33 |
+
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
|
| 34 |
+
"""
|
| 35 |
+
return x.mul(F.softplus(x).tanh())
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class SwishJit(nn.Module):
|
| 39 |
+
def __init__(self, inplace: bool = False):
|
| 40 |
+
super(SwishJit, self).__init__()
|
| 41 |
+
|
| 42 |
+
def forward(self, x):
|
| 43 |
+
return swish_jit(x)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class MishJit(nn.Module):
|
| 47 |
+
def __init__(self, inplace: bool = False):
|
| 48 |
+
super(MishJit, self).__init__()
|
| 49 |
+
|
| 50 |
+
def forward(self, x):
|
| 51 |
+
return mish_jit(x)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@torch.jit.script
|
| 55 |
+
def hard_sigmoid_jit(x, inplace: bool = False):
|
| 56 |
+
# return F.relu6(x + 3.) / 6.
|
| 57 |
+
return (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class HardSigmoidJit(nn.Module):
|
| 61 |
+
def __init__(self, inplace: bool = False):
|
| 62 |
+
super(HardSigmoidJit, self).__init__()
|
| 63 |
+
|
| 64 |
+
def forward(self, x):
|
| 65 |
+
return hard_sigmoid_jit(x)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@torch.jit.script
|
| 69 |
+
def hard_swish_jit(x, inplace: bool = False):
|
| 70 |
+
# return x * (F.relu6(x + 3.) / 6)
|
| 71 |
+
return x * (x + 3).clamp(min=0, max=6).div(6.) # clamp seems ever so slightly faster?
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
class HardSwishJit(nn.Module):
|
| 75 |
+
def __init__(self, inplace: bool = False):
|
| 76 |
+
super(HardSwishJit, self).__init__()
|
| 77 |
+
|
| 78 |
+
def forward(self, x):
|
| 79 |
+
return hard_swish_jit(x)
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/geffnet/activations/activations_me.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Activations (memory-efficient w/ custom autograd)
|
| 2 |
+
|
| 3 |
+
A collection of activations fn and modules with a common interface so that they can
|
| 4 |
+
easily be swapped. All have an `inplace` arg even if not used.
|
| 5 |
+
|
| 6 |
+
These activations are not compatible with jit scripting or ONNX export of the model, please use either
|
| 7 |
+
the JIT or basic versions of the activations.
|
| 8 |
+
|
| 9 |
+
Copyright 2020 Ross Wightman
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import torch
|
| 13 |
+
from torch import nn as nn
|
| 14 |
+
from torch.nn import functional as F
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
__all__ = ['swish_me', 'SwishMe', 'mish_me', 'MishMe',
|
| 18 |
+
'hard_sigmoid_me', 'HardSigmoidMe', 'hard_swish_me', 'HardSwishMe']
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@torch.jit.script
|
| 22 |
+
def swish_jit_fwd(x):
|
| 23 |
+
return x.mul(torch.sigmoid(x))
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@torch.jit.script
|
| 27 |
+
def swish_jit_bwd(x, grad_output):
|
| 28 |
+
x_sigmoid = torch.sigmoid(x)
|
| 29 |
+
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid)))
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class SwishJitAutoFn(torch.autograd.Function):
|
| 33 |
+
""" torch.jit.script optimised Swish w/ memory-efficient checkpoint
|
| 34 |
+
Inspired by conversation btw Jeremy Howard & Adam Pazske
|
| 35 |
+
https://twitter.com/jeremyphoward/status/1188251041835315200
|
| 36 |
+
|
| 37 |
+
Swish - Described originally as SiLU (https://arxiv.org/abs/1702.03118v3)
|
| 38 |
+
and also as Swish (https://arxiv.org/abs/1710.05941).
|
| 39 |
+
|
| 40 |
+
TODO Rename to SiLU with addition to PyTorch
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
@staticmethod
|
| 44 |
+
def forward(ctx, x):
|
| 45 |
+
ctx.save_for_backward(x)
|
| 46 |
+
return swish_jit_fwd(x)
|
| 47 |
+
|
| 48 |
+
@staticmethod
|
| 49 |
+
def backward(ctx, grad_output):
|
| 50 |
+
x = ctx.saved_tensors[0]
|
| 51 |
+
return swish_jit_bwd(x, grad_output)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def swish_me(x, inplace=False):
|
| 55 |
+
return SwishJitAutoFn.apply(x)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class SwishMe(nn.Module):
|
| 59 |
+
def __init__(self, inplace: bool = False):
|
| 60 |
+
super(SwishMe, self).__init__()
|
| 61 |
+
|
| 62 |
+
def forward(self, x):
|
| 63 |
+
return SwishJitAutoFn.apply(x)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@torch.jit.script
|
| 67 |
+
def mish_jit_fwd(x):
|
| 68 |
+
return x.mul(torch.tanh(F.softplus(x)))
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@torch.jit.script
|
| 72 |
+
def mish_jit_bwd(x, grad_output):
|
| 73 |
+
x_sigmoid = torch.sigmoid(x)
|
| 74 |
+
x_tanh_sp = F.softplus(x).tanh()
|
| 75 |
+
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class MishJitAutoFn(torch.autograd.Function):
|
| 79 |
+
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
|
| 80 |
+
A memory efficient, jit scripted variant of Mish
|
| 81 |
+
"""
|
| 82 |
+
@staticmethod
|
| 83 |
+
def forward(ctx, x):
|
| 84 |
+
ctx.save_for_backward(x)
|
| 85 |
+
return mish_jit_fwd(x)
|
| 86 |
+
|
| 87 |
+
@staticmethod
|
| 88 |
+
def backward(ctx, grad_output):
|
| 89 |
+
x = ctx.saved_tensors[0]
|
| 90 |
+
return mish_jit_bwd(x, grad_output)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def mish_me(x, inplace=False):
|
| 94 |
+
return MishJitAutoFn.apply(x)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class MishMe(nn.Module):
|
| 98 |
+
def __init__(self, inplace: bool = False):
|
| 99 |
+
super(MishMe, self).__init__()
|
| 100 |
+
|
| 101 |
+
def forward(self, x):
|
| 102 |
+
return MishJitAutoFn.apply(x)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@torch.jit.script
|
| 106 |
+
def hard_sigmoid_jit_fwd(x, inplace: bool = False):
|
| 107 |
+
return (x + 3).clamp(min=0, max=6).div(6.)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@torch.jit.script
|
| 111 |
+
def hard_sigmoid_jit_bwd(x, grad_output):
|
| 112 |
+
m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6.
|
| 113 |
+
return grad_output * m
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class HardSigmoidJitAutoFn(torch.autograd.Function):
|
| 117 |
+
@staticmethod
|
| 118 |
+
def forward(ctx, x):
|
| 119 |
+
ctx.save_for_backward(x)
|
| 120 |
+
return hard_sigmoid_jit_fwd(x)
|
| 121 |
+
|
| 122 |
+
@staticmethod
|
| 123 |
+
def backward(ctx, grad_output):
|
| 124 |
+
x = ctx.saved_tensors[0]
|
| 125 |
+
return hard_sigmoid_jit_bwd(x, grad_output)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def hard_sigmoid_me(x, inplace: bool = False):
|
| 129 |
+
return HardSigmoidJitAutoFn.apply(x)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
class HardSigmoidMe(nn.Module):
|
| 133 |
+
def __init__(self, inplace: bool = False):
|
| 134 |
+
super(HardSigmoidMe, self).__init__()
|
| 135 |
+
|
| 136 |
+
def forward(self, x):
|
| 137 |
+
return HardSigmoidJitAutoFn.apply(x)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
@torch.jit.script
|
| 141 |
+
def hard_swish_jit_fwd(x):
|
| 142 |
+
return x * (x + 3).clamp(min=0, max=6).div(6.)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
@torch.jit.script
|
| 146 |
+
def hard_swish_jit_bwd(x, grad_output):
|
| 147 |
+
m = torch.ones_like(x) * (x >= 3.)
|
| 148 |
+
m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m)
|
| 149 |
+
return grad_output * m
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class HardSwishJitAutoFn(torch.autograd.Function):
|
| 153 |
+
"""A memory efficient, jit-scripted HardSwish activation"""
|
| 154 |
+
@staticmethod
|
| 155 |
+
def forward(ctx, x):
|
| 156 |
+
ctx.save_for_backward(x)
|
| 157 |
+
return hard_swish_jit_fwd(x)
|
| 158 |
+
|
| 159 |
+
@staticmethod
|
| 160 |
+
def backward(ctx, grad_output):
|
| 161 |
+
x = ctx.saved_tensors[0]
|
| 162 |
+
return hard_swish_jit_bwd(x, grad_output)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def hard_swish_me(x, inplace=False):
|
| 166 |
+
return HardSwishJitAutoFn.apply(x)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class HardSwishMe(nn.Module):
|
| 170 |
+
def __init__(self, inplace: bool = False):
|
| 171 |
+
super(HardSwishMe, self).__init__()
|
| 172 |
+
|
| 173 |
+
def forward(self, x):
|
| 174 |
+
return HardSwishJitAutoFn.apply(x)
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/onnx_export.py
ADDED
|
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" ONNX export script
|
| 2 |
+
|
| 3 |
+
Export PyTorch models as ONNX graphs.
|
| 4 |
+
|
| 5 |
+
This export script originally started as an adaptation of code snippets found at
|
| 6 |
+
https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
|
| 7 |
+
|
| 8 |
+
The default parameters work with PyTorch 1.6 and ONNX 1.7 and produce an optimal ONNX graph
|
| 9 |
+
for hosting in the ONNX runtime (see onnx_validate.py). To export an ONNX model compatible
|
| 10 |
+
with caffe2 (see caffe2_benchmark.py and caffe2_validate.py), the --keep-init and --aten-fallback
|
| 11 |
+
flags are currently required.
|
| 12 |
+
|
| 13 |
+
Older versions of PyTorch/ONNX (tested PyTorch 1.4, ONNX 1.5) do not need extra flags for
|
| 14 |
+
caffe2 compatibility, but they produce a model that isn't as fast running on ONNX runtime.
|
| 15 |
+
|
| 16 |
+
Most new release of PyTorch and ONNX cause some sort of breakage in the export / usage of ONNX models.
|
| 17 |
+
Please do your research and search ONNX and PyTorch issue tracker before asking me. Thanks.
|
| 18 |
+
|
| 19 |
+
Copyright 2020 Ross Wightman
|
| 20 |
+
"""
|
| 21 |
+
import argparse
|
| 22 |
+
import torch
|
| 23 |
+
import numpy as np
|
| 24 |
+
|
| 25 |
+
import onnx
|
| 26 |
+
import geffnet
|
| 27 |
+
|
| 28 |
+
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
|
| 29 |
+
parser.add_argument('output', metavar='ONNX_FILE',
|
| 30 |
+
help='output model filename')
|
| 31 |
+
parser.add_argument('--model', '-m', metavar='MODEL', default='mobilenetv3_large_100',
|
| 32 |
+
help='model architecture (default: mobilenetv3_large_100)')
|
| 33 |
+
parser.add_argument('--opset', type=int, default=10,
|
| 34 |
+
help='ONNX opset to use (default: 10)')
|
| 35 |
+
parser.add_argument('--keep-init', action='store_true', default=False,
|
| 36 |
+
help='Keep initializers as input. Needed for Caffe2 compatible export in newer PyTorch/ONNX.')
|
| 37 |
+
parser.add_argument('--aten-fallback', action='store_true', default=False,
|
| 38 |
+
help='Fallback to ATEN ops. Helps fix AdaptiveAvgPool issue with Caffe2 in newer PyTorch/ONNX.')
|
| 39 |
+
parser.add_argument('--dynamic-size', action='store_true', default=False,
|
| 40 |
+
help='Export model width dynamic width/height. Not recommended for "tf" models with SAME padding.')
|
| 41 |
+
parser.add_argument('-b', '--batch-size', default=1, type=int,
|
| 42 |
+
metavar='N', help='mini-batch size (default: 1)')
|
| 43 |
+
parser.add_argument('--img-size', default=None, type=int,
|
| 44 |
+
metavar='N', help='Input image dimension, uses model default if empty')
|
| 45 |
+
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
|
| 46 |
+
help='Override mean pixel value of dataset')
|
| 47 |
+
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
|
| 48 |
+
help='Override std deviation of of dataset')
|
| 49 |
+
parser.add_argument('--num-classes', type=int, default=1000,
|
| 50 |
+
help='Number classes in dataset')
|
| 51 |
+
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
|
| 52 |
+
help='path to checkpoint (default: none)')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def main():
|
| 56 |
+
args = parser.parse_args()
|
| 57 |
+
|
| 58 |
+
args.pretrained = True
|
| 59 |
+
if args.checkpoint:
|
| 60 |
+
args.pretrained = False
|
| 61 |
+
|
| 62 |
+
print("==> Creating PyTorch {} model".format(args.model))
|
| 63 |
+
# NOTE exportable=True flag disables autofn/jit scripted activations and uses Conv2dSameExport layers
|
| 64 |
+
# for models using SAME padding
|
| 65 |
+
model = geffnet.create_model(
|
| 66 |
+
args.model,
|
| 67 |
+
num_classes=args.num_classes,
|
| 68 |
+
in_chans=3,
|
| 69 |
+
pretrained=args.pretrained,
|
| 70 |
+
checkpoint_path=args.checkpoint,
|
| 71 |
+
exportable=True)
|
| 72 |
+
|
| 73 |
+
model.eval()
|
| 74 |
+
|
| 75 |
+
example_input = torch.randn((args.batch_size, 3, args.img_size or 224, args.img_size or 224), requires_grad=True)
|
| 76 |
+
|
| 77 |
+
# Run model once before export trace, sets padding for models with Conv2dSameExport. This means
|
| 78 |
+
# that the padding for models with Conv2dSameExport (most models with tf_ prefix) is fixed for
|
| 79 |
+
# the input img_size specified in this script.
|
| 80 |
+
# Opset >= 11 should allow for dynamic padding, however I cannot get it to work due to
|
| 81 |
+
# issues in the tracing of the dynamic padding or errors attempting to export the model after jit
|
| 82 |
+
# scripting it (an approach that should work). Perhaps in a future PyTorch or ONNX versions...
|
| 83 |
+
model(example_input)
|
| 84 |
+
|
| 85 |
+
print("==> Exporting model to ONNX format at '{}'".format(args.output))
|
| 86 |
+
input_names = ["input0"]
|
| 87 |
+
output_names = ["output0"]
|
| 88 |
+
dynamic_axes = {'input0': {0: 'batch'}, 'output0': {0: 'batch'}}
|
| 89 |
+
if args.dynamic_size:
|
| 90 |
+
dynamic_axes['input0'][2] = 'height'
|
| 91 |
+
dynamic_axes['input0'][3] = 'width'
|
| 92 |
+
if args.aten_fallback:
|
| 93 |
+
export_type = torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK
|
| 94 |
+
else:
|
| 95 |
+
export_type = torch.onnx.OperatorExportTypes.ONNX
|
| 96 |
+
|
| 97 |
+
torch_out = torch.onnx._export(
|
| 98 |
+
model, example_input, args.output, export_params=True, verbose=True, input_names=input_names,
|
| 99 |
+
output_names=output_names, keep_initializers_as_inputs=args.keep_init, dynamic_axes=dynamic_axes,
|
| 100 |
+
opset_version=args.opset, operator_export_type=export_type)
|
| 101 |
+
|
| 102 |
+
print("==> Loading and checking exported model from '{}'".format(args.output))
|
| 103 |
+
onnx_model = onnx.load(args.output)
|
| 104 |
+
onnx.checker.check_model(onnx_model) # assuming throw on error
|
| 105 |
+
print("==> Passed")
|
| 106 |
+
|
| 107 |
+
if args.keep_init and args.aten_fallback:
|
| 108 |
+
import caffe2.python.onnx.backend as onnx_caffe2
|
| 109 |
+
# Caffe2 loading only works properly in newer PyTorch/ONNX combos when
|
| 110 |
+
# keep_initializers_as_inputs and aten_fallback are set to True.
|
| 111 |
+
print("==> Loading model into Caffe2 backend and comparing forward pass.".format(args.output))
|
| 112 |
+
caffe2_backend = onnx_caffe2.prepare(onnx_model)
|
| 113 |
+
B = {onnx_model.graph.input[0].name: x.data.numpy()}
|
| 114 |
+
c2_out = caffe2_backend.run(B)[0]
|
| 115 |
+
np.testing.assert_almost_equal(torch_out.data.numpy(), c2_out, decimal=5)
|
| 116 |
+
print("==> Passed")
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
if __name__ == '__main__':
|
| 120 |
+
main()
|
RAVE-main/annotator/normalbae/models/submodules/efficientnet_repo/onnx_optimize.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" ONNX optimization script
|
| 2 |
+
|
| 3 |
+
Run ONNX models through the optimizer to prune unneeded nodes, fuse batchnorm layers into conv, etc.
|
| 4 |
+
|
| 5 |
+
NOTE: This isn't working consistently in recent PyTorch/ONNX combos (ie PyTorch 1.6 and ONNX 1.7),
|
| 6 |
+
it seems time to switch to using the onnxruntime online optimizer (can also be saved for offline).
|
| 7 |
+
|
| 8 |
+
Copyright 2020 Ross Wightman
|
| 9 |
+
"""
|
| 10 |
+
import argparse
|
| 11 |
+
import warnings
|
| 12 |
+
|
| 13 |
+
import onnx
|
| 14 |
+
from onnx import optimizer
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
parser = argparse.ArgumentParser(description="Optimize ONNX model")
|
| 18 |
+
|
| 19 |
+
parser.add_argument("model", help="The ONNX model")
|
| 20 |
+
parser.add_argument("--output", required=True, help="The optimized model output filename")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def traverse_graph(graph, prefix=''):
|
| 24 |
+
content = []
|
| 25 |
+
indent = prefix + ' '
|
| 26 |
+
graphs = []
|
| 27 |
+
num_nodes = 0
|
| 28 |
+
for node in graph.node:
|
| 29 |
+
pn, gs = onnx.helper.printable_node(node, indent, subgraphs=True)
|
| 30 |
+
assert isinstance(gs, list)
|
| 31 |
+
content.append(pn)
|
| 32 |
+
graphs.extend(gs)
|
| 33 |
+
num_nodes += 1
|
| 34 |
+
for g in graphs:
|
| 35 |
+
g_count, g_str = traverse_graph(g)
|
| 36 |
+
content.append('\n' + g_str)
|
| 37 |
+
num_nodes += g_count
|
| 38 |
+
return num_nodes, '\n'.join(content)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def main():
|
| 42 |
+
args = parser.parse_args()
|
| 43 |
+
onnx_model = onnx.load(args.model)
|
| 44 |
+
num_original_nodes, original_graph_str = traverse_graph(onnx_model.graph)
|
| 45 |
+
|
| 46 |
+
# Optimizer passes to perform
|
| 47 |
+
passes = [
|
| 48 |
+
#'eliminate_deadend',
|
| 49 |
+
'eliminate_identity',
|
| 50 |
+
'eliminate_nop_dropout',
|
| 51 |
+
'eliminate_nop_pad',
|
| 52 |
+
'eliminate_nop_transpose',
|
| 53 |
+
'eliminate_unused_initializer',
|
| 54 |
+
'extract_constant_to_initializer',
|
| 55 |
+
'fuse_add_bias_into_conv',
|
| 56 |
+
'fuse_bn_into_conv',
|
| 57 |
+
'fuse_consecutive_concats',
|
| 58 |
+
'fuse_consecutive_reduce_unsqueeze',
|
| 59 |
+
'fuse_consecutive_squeezes',
|
| 60 |
+
'fuse_consecutive_transposes',
|
| 61 |
+
#'fuse_matmul_add_bias_into_gemm',
|
| 62 |
+
'fuse_pad_into_conv',
|
| 63 |
+
#'fuse_transpose_into_gemm',
|
| 64 |
+
#'lift_lexical_references',
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Apply the optimization on the original serialized model
|
| 68 |
+
# WARNING I've had issues with optimizer in recent versions of PyTorch / ONNX causing
|
| 69 |
+
# 'duplicate definition of name' errors, see: https://github.com/onnx/onnx/issues/2401
|
| 70 |
+
# It may be better to rely on onnxruntime optimizations, see onnx_validate.py script.
|
| 71 |
+
warnings.warn("I've had issues with optimizer in recent versions of PyTorch / ONNX."
|
| 72 |
+
"Try onnxruntime optimization if this doesn't work.")
|
| 73 |
+
optimized_model = optimizer.optimize(onnx_model, passes)
|
| 74 |
+
|
| 75 |
+
num_optimized_nodes, optimzied_graph_str = traverse_graph(optimized_model.graph)
|
| 76 |
+
print('==> The model after optimization:\n{}\n'.format(optimzied_graph_str))
|
| 77 |
+
print('==> The optimized model has {} nodes, the original had {}.'.format(num_optimized_nodes, num_original_nodes))
|
| 78 |
+
|
| 79 |
+
# Save the ONNX model
|
| 80 |
+
onnx.save(optimized_model, args.output)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
if __name__ == "__main__":
|
| 84 |
+
main()
|
RAVE-main/annotator/normalbae/models/submodules/encoder.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import torch
|
| 3 |
+
import torch.nn as nn
|
| 4 |
+
import torch.nn.functional as F
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Encoder(nn.Module):
|
| 8 |
+
def __init__(self):
|
| 9 |
+
super(Encoder, self).__init__()
|
| 10 |
+
|
| 11 |
+
basemodel_name = 'tf_efficientnet_b5_ap'
|
| 12 |
+
print('Loading base model ()...'.format(basemodel_name), end='')
|
| 13 |
+
repo_path = os.path.join(os.path.dirname(__file__), 'efficientnet_repo')
|
| 14 |
+
basemodel = torch.hub.load(repo_path, basemodel_name, pretrained=False, source='local')
|
| 15 |
+
print('Done.')
|
| 16 |
+
|
| 17 |
+
# Remove last layer
|
| 18 |
+
print('Removing last two layers (global_pool & classifier).')
|
| 19 |
+
basemodel.global_pool = nn.Identity()
|
| 20 |
+
basemodel.classifier = nn.Identity()
|
| 21 |
+
|
| 22 |
+
self.original_model = basemodel
|
| 23 |
+
|
| 24 |
+
def forward(self, x):
|
| 25 |
+
features = [x]
|
| 26 |
+
for k, v in self.original_model._modules.items():
|
| 27 |
+
if (k == 'blocks'):
|
| 28 |
+
for ki, vi in v._modules.items():
|
| 29 |
+
features.append(vi(features[-1]))
|
| 30 |
+
else:
|
| 31 |
+
features.append(v(features[-1]))
|
| 32 |
+
return features
|
| 33 |
+
|
| 34 |
+
|
RAVE-main/annotator/normalbae/models/submodules/submodules.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
########################################################################################################################
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Upsample + BatchNorm
|
| 10 |
+
class UpSampleBN(nn.Module):
|
| 11 |
+
def __init__(self, skip_input, output_features):
|
| 12 |
+
super(UpSampleBN, self).__init__()
|
| 13 |
+
|
| 14 |
+
self._net = nn.Sequential(nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),
|
| 15 |
+
nn.BatchNorm2d(output_features),
|
| 16 |
+
nn.LeakyReLU(),
|
| 17 |
+
nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1),
|
| 18 |
+
nn.BatchNorm2d(output_features),
|
| 19 |
+
nn.LeakyReLU())
|
| 20 |
+
|
| 21 |
+
def forward(self, x, concat_with):
|
| 22 |
+
up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
|
| 23 |
+
f = torch.cat([up_x, concat_with], dim=1)
|
| 24 |
+
return self._net(f)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# Upsample + GroupNorm + Weight Standardization
|
| 28 |
+
class UpSampleGN(nn.Module):
|
| 29 |
+
def __init__(self, skip_input, output_features):
|
| 30 |
+
super(UpSampleGN, self).__init__()
|
| 31 |
+
|
| 32 |
+
self._net = nn.Sequential(Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),
|
| 33 |
+
nn.GroupNorm(8, output_features),
|
| 34 |
+
nn.LeakyReLU(),
|
| 35 |
+
Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1),
|
| 36 |
+
nn.GroupNorm(8, output_features),
|
| 37 |
+
nn.LeakyReLU())
|
| 38 |
+
|
| 39 |
+
def forward(self, x, concat_with):
|
| 40 |
+
up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
|
| 41 |
+
f = torch.cat([up_x, concat_with], dim=1)
|
| 42 |
+
return self._net(f)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# Conv2d with weight standardization
|
| 46 |
+
class Conv2d(nn.Conv2d):
|
| 47 |
+
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
|
| 48 |
+
padding=0, dilation=1, groups=1, bias=True):
|
| 49 |
+
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
|
| 50 |
+
padding, dilation, groups, bias)
|
| 51 |
+
|
| 52 |
+
def forward(self, x):
|
| 53 |
+
weight = self.weight
|
| 54 |
+
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
|
| 55 |
+
keepdim=True).mean(dim=3, keepdim=True)
|
| 56 |
+
weight = weight - weight_mean
|
| 57 |
+
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
|
| 58 |
+
weight = weight / std.expand_as(weight)
|
| 59 |
+
return F.conv2d(x, weight, self.bias, self.stride,
|
| 60 |
+
self.padding, self.dilation, self.groups)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# normalize
|
| 64 |
+
def norm_normalize(norm_out):
|
| 65 |
+
min_kappa = 0.01
|
| 66 |
+
norm_x, norm_y, norm_z, kappa = torch.split(norm_out, 1, dim=1)
|
| 67 |
+
norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10
|
| 68 |
+
kappa = F.elu(kappa) + 1.0 + min_kappa
|
| 69 |
+
final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm, kappa], dim=1)
|
| 70 |
+
return final_out
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# uncertainty-guided sampling (only used during training)
|
| 74 |
+
@torch.no_grad()
|
| 75 |
+
def sample_points(init_normal, gt_norm_mask, sampling_ratio, beta):
|
| 76 |
+
device = init_normal.device
|
| 77 |
+
B, _, H, W = init_normal.shape
|
| 78 |
+
N = int(sampling_ratio * H * W)
|
| 79 |
+
beta = beta
|
| 80 |
+
|
| 81 |
+
# uncertainty map
|
| 82 |
+
uncertainty_map = -1 * init_normal[:, 3, :, :] # B, H, W
|
| 83 |
+
|
| 84 |
+
# gt_invalid_mask (B, H, W)
|
| 85 |
+
if gt_norm_mask is not None:
|
| 86 |
+
gt_invalid_mask = F.interpolate(gt_norm_mask.float(), size=[H, W], mode='nearest')
|
| 87 |
+
gt_invalid_mask = gt_invalid_mask[:, 0, :, :] < 0.5
|
| 88 |
+
uncertainty_map[gt_invalid_mask] = -1e4
|
| 89 |
+
|
| 90 |
+
# (B, H*W)
|
| 91 |
+
_, idx = uncertainty_map.view(B, -1).sort(1, descending=True)
|
| 92 |
+
|
| 93 |
+
# importance sampling
|
| 94 |
+
if int(beta * N) > 0:
|
| 95 |
+
importance = idx[:, :int(beta * N)] # B, beta*N
|
| 96 |
+
|
| 97 |
+
# remaining
|
| 98 |
+
remaining = idx[:, int(beta * N):] # B, H*W - beta*N
|
| 99 |
+
|
| 100 |
+
# coverage
|
| 101 |
+
num_coverage = N - int(beta * N)
|
| 102 |
+
|
| 103 |
+
if num_coverage <= 0:
|
| 104 |
+
samples = importance
|
| 105 |
+
else:
|
| 106 |
+
coverage_list = []
|
| 107 |
+
for i in range(B):
|
| 108 |
+
idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
|
| 109 |
+
coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
|
| 110 |
+
coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
|
| 111 |
+
samples = torch.cat((importance, coverage), dim=1) # B, N
|
| 112 |
+
|
| 113 |
+
else:
|
| 114 |
+
# remaining
|
| 115 |
+
remaining = idx[:, :] # B, H*W
|
| 116 |
+
|
| 117 |
+
# coverage
|
| 118 |
+
num_coverage = N
|
| 119 |
+
|
| 120 |
+
coverage_list = []
|
| 121 |
+
for i in range(B):
|
| 122 |
+
idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
|
| 123 |
+
coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
|
| 124 |
+
coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
|
| 125 |
+
samples = coverage
|
| 126 |
+
|
| 127 |
+
# point coordinates
|
| 128 |
+
rows_int = samples // W # 0 for first row, H-1 for last row
|
| 129 |
+
rows_float = rows_int / float(H-1) # 0 to 1.0
|
| 130 |
+
rows_float = (rows_float * 2.0) - 1.0 # -1.0 to 1.0
|
| 131 |
+
|
| 132 |
+
cols_int = samples % W # 0 for first column, W-1 for last column
|
| 133 |
+
cols_float = cols_int / float(W-1) # 0 to 1.0
|
| 134 |
+
cols_float = (cols_float * 2.0) - 1.0 # -1.0 to 1.0
|
| 135 |
+
|
| 136 |
+
point_coords = torch.zeros(B, 1, N, 2)
|
| 137 |
+
point_coords[:, 0, :, 0] = cols_float # x coord
|
| 138 |
+
point_coords[:, 0, :, 1] = rows_float # y coord
|
| 139 |
+
point_coords = point_coords.to(device)
|
| 140 |
+
return point_coords, rows_int, cols_int
|
RAVE-main/annotator/oneformer/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2022 Caroline Chan
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
RAVE-main/annotator/oneformer/__init__.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from modules import devices
|
| 3 |
+
from annotator.annotator_path import models_path
|
| 4 |
+
from .api import make_detectron2_model, semantic_run
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class OneformerDetector:
|
| 8 |
+
model_dir = os.path.join(models_path, "oneformer")
|
| 9 |
+
configs = {
|
| 10 |
+
"coco": {
|
| 11 |
+
"name": "150_16_swin_l_oneformer_coco_100ep.pth",
|
| 12 |
+
"config": 'configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml'
|
| 13 |
+
},
|
| 14 |
+
"ade20k": {
|
| 15 |
+
"name": "250_16_swin_l_oneformer_ade20k_160k.pth",
|
| 16 |
+
"config": 'configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml'
|
| 17 |
+
}
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
def __init__(self, config):
|
| 21 |
+
self.model = None
|
| 22 |
+
self.metadata = None
|
| 23 |
+
self.config = config
|
| 24 |
+
self.device = devices.get_device_for("controlnet")
|
| 25 |
+
|
| 26 |
+
def load_model(self):
|
| 27 |
+
remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/" + self.config["name"]
|
| 28 |
+
modelpath = os.path.join(self.model_dir, self.config["name"])
|
| 29 |
+
if not os.path.exists(modelpath):
|
| 30 |
+
from basicsr.utils.download_util import load_file_from_url
|
| 31 |
+
load_file_from_url(remote_model_path, model_dir=self.model_dir)
|
| 32 |
+
config = os.path.join(os.path.dirname(__file__), self.config["config"])
|
| 33 |
+
model, self.metadata = make_detectron2_model(config, modelpath)
|
| 34 |
+
self.model = model
|
| 35 |
+
|
| 36 |
+
def unload_model(self):
|
| 37 |
+
if self.model is not None:
|
| 38 |
+
self.model.model.cpu()
|
| 39 |
+
|
| 40 |
+
def __call__(self, img):
|
| 41 |
+
if self.model is None:
|
| 42 |
+
self.load_model()
|
| 43 |
+
|
| 44 |
+
self.model.model.to(self.device)
|
| 45 |
+
return semantic_run(img, self.model, self.metadata)
|
RAVE-main/annotator/oneformer/api.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
|
| 3 |
+
|
| 4 |
+
import torch
|
| 5 |
+
|
| 6 |
+
from annotator.oneformer.detectron2.config import get_cfg
|
| 7 |
+
from annotator.oneformer.detectron2.projects.deeplab import add_deeplab_config
|
| 8 |
+
from annotator.oneformer.detectron2.data import MetadataCatalog
|
| 9 |
+
|
| 10 |
+
from annotator.oneformer.oneformer import (
|
| 11 |
+
add_oneformer_config,
|
| 12 |
+
add_common_config,
|
| 13 |
+
add_swin_config,
|
| 14 |
+
add_dinat_config,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
from annotator.oneformer.oneformer.demo.defaults import DefaultPredictor
|
| 18 |
+
from annotator.oneformer.oneformer.demo.visualizer import Visualizer, ColorMode
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def make_detectron2_model(config_path, ckpt_path):
|
| 22 |
+
cfg = get_cfg()
|
| 23 |
+
add_deeplab_config(cfg)
|
| 24 |
+
add_common_config(cfg)
|
| 25 |
+
add_swin_config(cfg)
|
| 26 |
+
add_oneformer_config(cfg)
|
| 27 |
+
add_dinat_config(cfg)
|
| 28 |
+
cfg.merge_from_file(config_path)
|
| 29 |
+
cfg.MODEL.WEIGHTS = ckpt_path
|
| 30 |
+
cfg.freeze()
|
| 31 |
+
metadata = MetadataCatalog.get(cfg.DATASETS.TEST_PANOPTIC[0] if len(cfg.DATASETS.TEST_PANOPTIC) else "__unused")
|
| 32 |
+
return DefaultPredictor(cfg), metadata
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def semantic_run(img, predictor, metadata):
|
| 36 |
+
predictions = predictor(img[:, :, ::-1], "semantic") # Predictor of OneFormer must use BGR image !!!
|
| 37 |
+
visualizer_map = Visualizer(img, is_img=False, metadata=metadata, instance_mode=ColorMode.IMAGE)
|
| 38 |
+
out_map = visualizer_map.draw_sem_seg(predictions["sem_seg"].argmax(dim=0).cpu(), alpha=1, is_text=False).get_image()
|
| 39 |
+
return out_map
|
RAVE-main/annotator/oneformer/configs/ade20k/Base-ADE20K-UnifiedSegmentation.yaml
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MODEL:
|
| 2 |
+
BACKBONE:
|
| 3 |
+
FREEZE_AT: 0
|
| 4 |
+
NAME: "build_resnet_backbone"
|
| 5 |
+
WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
|
| 6 |
+
PIXEL_MEAN: [123.675, 116.280, 103.530]
|
| 7 |
+
PIXEL_STD: [58.395, 57.120, 57.375]
|
| 8 |
+
RESNETS:
|
| 9 |
+
DEPTH: 50
|
| 10 |
+
STEM_TYPE: "basic" # not used
|
| 11 |
+
STEM_OUT_CHANNELS: 64
|
| 12 |
+
STRIDE_IN_1X1: False
|
| 13 |
+
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
|
| 14 |
+
# NORM: "SyncBN"
|
| 15 |
+
RES5_MULTI_GRID: [1, 1, 1] # not used
|
| 16 |
+
DATASETS:
|
| 17 |
+
TRAIN: ("ade20k_panoptic_train",)
|
| 18 |
+
TEST_PANOPTIC: ("ade20k_panoptic_val",)
|
| 19 |
+
TEST_INSTANCE: ("ade20k_instance_val",)
|
| 20 |
+
TEST_SEMANTIC: ("ade20k_sem_seg_val",)
|
| 21 |
+
SOLVER:
|
| 22 |
+
IMS_PER_BATCH: 16
|
| 23 |
+
BASE_LR: 0.0001
|
| 24 |
+
MAX_ITER: 160000
|
| 25 |
+
WARMUP_FACTOR: 1.0
|
| 26 |
+
WARMUP_ITERS: 0
|
| 27 |
+
WEIGHT_DECAY: 0.05
|
| 28 |
+
OPTIMIZER: "ADAMW"
|
| 29 |
+
LR_SCHEDULER_NAME: "WarmupPolyLR"
|
| 30 |
+
BACKBONE_MULTIPLIER: 0.1
|
| 31 |
+
CLIP_GRADIENTS:
|
| 32 |
+
ENABLED: True
|
| 33 |
+
CLIP_TYPE: "full_model"
|
| 34 |
+
CLIP_VALUE: 0.01
|
| 35 |
+
NORM_TYPE: 2.0
|
| 36 |
+
AMP:
|
| 37 |
+
ENABLED: True
|
| 38 |
+
INPUT:
|
| 39 |
+
MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 512) for x in range(5, 21)]"]
|
| 40 |
+
MIN_SIZE_TRAIN_SAMPLING: "choice"
|
| 41 |
+
MIN_SIZE_TEST: 512
|
| 42 |
+
MAX_SIZE_TRAIN: 2048
|
| 43 |
+
MAX_SIZE_TEST: 2048
|
| 44 |
+
CROP:
|
| 45 |
+
ENABLED: True
|
| 46 |
+
TYPE: "absolute"
|
| 47 |
+
SIZE: (512, 512)
|
| 48 |
+
SINGLE_CATEGORY_MAX_AREA: 1.0
|
| 49 |
+
COLOR_AUG_SSD: True
|
| 50 |
+
SIZE_DIVISIBILITY: 512 # used in dataset mapper
|
| 51 |
+
FORMAT: "RGB"
|
| 52 |
+
DATASET_MAPPER_NAME: "oneformer_unified"
|
| 53 |
+
MAX_SEQ_LEN: 77
|
| 54 |
+
TASK_SEQ_LEN: 77
|
| 55 |
+
TASK_PROB:
|
| 56 |
+
SEMANTIC: 0.33
|
| 57 |
+
INSTANCE: 0.66
|
| 58 |
+
TEST:
|
| 59 |
+
EVAL_PERIOD: 5000
|
| 60 |
+
AUG:
|
| 61 |
+
ENABLED: False
|
| 62 |
+
MIN_SIZES: [256, 384, 512, 640, 768, 896]
|
| 63 |
+
MAX_SIZE: 3584
|
| 64 |
+
FLIP: True
|
| 65 |
+
DATALOADER:
|
| 66 |
+
FILTER_EMPTY_ANNOTATIONS: True
|
| 67 |
+
NUM_WORKERS: 4
|
| 68 |
+
VERSION: 2
|
RAVE-main/annotator/oneformer/configs/ade20k/oneformer_R50_bs16_160k.yaml
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_BASE_: Base-ADE20K-UnifiedSegmentation.yaml
|
| 2 |
+
MODEL:
|
| 3 |
+
META_ARCHITECTURE: "OneFormer"
|
| 4 |
+
SEM_SEG_HEAD:
|
| 5 |
+
NAME: "OneFormerHead"
|
| 6 |
+
IGNORE_VALUE: 255
|
| 7 |
+
NUM_CLASSES: 150
|
| 8 |
+
LOSS_WEIGHT: 1.0
|
| 9 |
+
CONVS_DIM: 256
|
| 10 |
+
MASK_DIM: 256
|
| 11 |
+
NORM: "GN"
|
| 12 |
+
# pixel decoder
|
| 13 |
+
PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
|
| 14 |
+
IN_FEATURES: ["res2", "res3", "res4", "res5"]
|
| 15 |
+
DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
|
| 16 |
+
COMMON_STRIDE: 4
|
| 17 |
+
TRANSFORMER_ENC_LAYERS: 6
|
| 18 |
+
ONE_FORMER:
|
| 19 |
+
TRANSFORMER_DECODER_NAME: "ContrastiveMultiScaleMaskedTransformerDecoder"
|
| 20 |
+
TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
|
| 21 |
+
DEEP_SUPERVISION: True
|
| 22 |
+
NO_OBJECT_WEIGHT: 0.1
|
| 23 |
+
CLASS_WEIGHT: 2.0
|
| 24 |
+
MASK_WEIGHT: 5.0
|
| 25 |
+
DICE_WEIGHT: 5.0
|
| 26 |
+
CONTRASTIVE_WEIGHT: 0.5
|
| 27 |
+
CONTRASTIVE_TEMPERATURE: 0.07
|
| 28 |
+
HIDDEN_DIM: 256
|
| 29 |
+
NUM_OBJECT_QUERIES: 150
|
| 30 |
+
USE_TASK_NORM: True
|
| 31 |
+
NHEADS: 8
|
| 32 |
+
DROPOUT: 0.1
|
| 33 |
+
DIM_FEEDFORWARD: 2048
|
| 34 |
+
ENC_LAYERS: 0
|
| 35 |
+
PRE_NORM: False
|
| 36 |
+
ENFORCE_INPUT_PROJ: False
|
| 37 |
+
SIZE_DIVISIBILITY: 32
|
| 38 |
+
CLASS_DEC_LAYERS: 2
|
| 39 |
+
DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
|
| 40 |
+
TRAIN_NUM_POINTS: 12544
|
| 41 |
+
OVERSAMPLE_RATIO: 3.0
|
| 42 |
+
IMPORTANCE_SAMPLE_RATIO: 0.75
|
| 43 |
+
TEXT_ENCODER:
|
| 44 |
+
WIDTH: 256
|
| 45 |
+
CONTEXT_LENGTH: 77
|
| 46 |
+
NUM_LAYERS: 6
|
| 47 |
+
VOCAB_SIZE: 49408
|
| 48 |
+
PROJ_NUM_LAYERS: 2
|
| 49 |
+
N_CTX: 16
|
| 50 |
+
TEST:
|
| 51 |
+
SEMANTIC_ON: True
|
| 52 |
+
INSTANCE_ON: True
|
| 53 |
+
PANOPTIC_ON: True
|
| 54 |
+
OVERLAP_THRESHOLD: 0.8
|
| 55 |
+
OBJECT_MASK_THRESHOLD: 0.8
|
| 56 |
+
TASK: "panoptic"
|
| 57 |
+
TEST:
|
| 58 |
+
DETECTIONS_PER_IMAGE: 150
|
RAVE-main/annotator/oneformer/configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_BASE_: oneformer_R50_bs16_160k.yaml
|
| 2 |
+
MODEL:
|
| 3 |
+
BACKBONE:
|
| 4 |
+
NAME: "D2SwinTransformer"
|
| 5 |
+
SWIN:
|
| 6 |
+
EMBED_DIM: 192
|
| 7 |
+
DEPTHS: [2, 2, 18, 2]
|
| 8 |
+
NUM_HEADS: [6, 12, 24, 48]
|
| 9 |
+
WINDOW_SIZE: 12
|
| 10 |
+
APE: False
|
| 11 |
+
DROP_PATH_RATE: 0.3
|
| 12 |
+
PATCH_NORM: True
|
| 13 |
+
PRETRAIN_IMG_SIZE: 384
|
| 14 |
+
WEIGHTS: "swin_large_patch4_window12_384_22k.pkl"
|
| 15 |
+
PIXEL_MEAN: [123.675, 116.280, 103.530]
|
| 16 |
+
PIXEL_STD: [58.395, 57.120, 57.375]
|
| 17 |
+
ONE_FORMER:
|
| 18 |
+
NUM_OBJECT_QUERIES: 250
|
| 19 |
+
INPUT:
|
| 20 |
+
MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 640) for x in range(5, 21)]"]
|
| 21 |
+
MIN_SIZE_TRAIN_SAMPLING: "choice"
|
| 22 |
+
MIN_SIZE_TEST: 640
|
| 23 |
+
MAX_SIZE_TRAIN: 2560
|
| 24 |
+
MAX_SIZE_TEST: 2560
|
| 25 |
+
CROP:
|
| 26 |
+
ENABLED: True
|
| 27 |
+
TYPE: "absolute"
|
| 28 |
+
SIZE: (640, 640)
|
| 29 |
+
SINGLE_CATEGORY_MAX_AREA: 1.0
|
| 30 |
+
COLOR_AUG_SSD: True
|
| 31 |
+
SIZE_DIVISIBILITY: 640 # used in dataset mapper
|
| 32 |
+
FORMAT: "RGB"
|
| 33 |
+
TEST:
|
| 34 |
+
DETECTIONS_PER_IMAGE: 250
|
| 35 |
+
EVAL_PERIOD: 5000
|
| 36 |
+
AUG:
|
| 37 |
+
ENABLED: False
|
| 38 |
+
MIN_SIZES: [320, 480, 640, 800, 960, 1120]
|
| 39 |
+
MAX_SIZE: 4480
|
| 40 |
+
FLIP: True
|
RAVE-main/annotator/oneformer/configs/coco/Base-COCO-UnifiedSegmentation.yaml
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MODEL:
|
| 2 |
+
BACKBONE:
|
| 3 |
+
FREEZE_AT: 0
|
| 4 |
+
NAME: "build_resnet_backbone"
|
| 5 |
+
WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
|
| 6 |
+
PIXEL_MEAN: [123.675, 116.280, 103.530]
|
| 7 |
+
PIXEL_STD: [58.395, 57.120, 57.375]
|
| 8 |
+
RESNETS:
|
| 9 |
+
DEPTH: 50
|
| 10 |
+
STEM_TYPE: "basic" # not used
|
| 11 |
+
STEM_OUT_CHANNELS: 64
|
| 12 |
+
STRIDE_IN_1X1: False
|
| 13 |
+
OUT_FEATURES: ["res2", "res3", "res4", "res5"]
|
| 14 |
+
# NORM: "SyncBN"
|
| 15 |
+
RES5_MULTI_GRID: [1, 1, 1] # not used
|
| 16 |
+
DATASETS:
|
| 17 |
+
TRAIN: ("coco_2017_train_panoptic_with_sem_seg",)
|
| 18 |
+
TEST_PANOPTIC: ("coco_2017_val_panoptic_with_sem_seg",) # to evaluate instance and semantic performance as well
|
| 19 |
+
TEST_INSTANCE: ("coco_2017_val",)
|
| 20 |
+
TEST_SEMANTIC: ("coco_2017_val_panoptic_with_sem_seg",)
|
| 21 |
+
SOLVER:
|
| 22 |
+
IMS_PER_BATCH: 16
|
| 23 |
+
BASE_LR: 0.0001
|
| 24 |
+
STEPS: (327778, 355092)
|
| 25 |
+
MAX_ITER: 368750
|
| 26 |
+
WARMUP_FACTOR: 1.0
|
| 27 |
+
WARMUP_ITERS: 10
|
| 28 |
+
WEIGHT_DECAY: 0.05
|
| 29 |
+
OPTIMIZER: "ADAMW"
|
| 30 |
+
BACKBONE_MULTIPLIER: 0.1
|
| 31 |
+
CLIP_GRADIENTS:
|
| 32 |
+
ENABLED: True
|
| 33 |
+
CLIP_TYPE: "full_model"
|
| 34 |
+
CLIP_VALUE: 0.01
|
| 35 |
+
NORM_TYPE: 2.0
|
| 36 |
+
AMP:
|
| 37 |
+
ENABLED: True
|
| 38 |
+
INPUT:
|
| 39 |
+
IMAGE_SIZE: 1024
|
| 40 |
+
MIN_SCALE: 0.1
|
| 41 |
+
MAX_SCALE: 2.0
|
| 42 |
+
FORMAT: "RGB"
|
| 43 |
+
DATASET_MAPPER_NAME: "coco_unified_lsj"
|
| 44 |
+
MAX_SEQ_LEN: 77
|
| 45 |
+
TASK_SEQ_LEN: 77
|
| 46 |
+
TASK_PROB:
|
| 47 |
+
SEMANTIC: 0.33
|
| 48 |
+
INSTANCE: 0.66
|
| 49 |
+
TEST:
|
| 50 |
+
EVAL_PERIOD: 5000
|
| 51 |
+
DATALOADER:
|
| 52 |
+
FILTER_EMPTY_ANNOTATIONS: True
|
| 53 |
+
NUM_WORKERS: 4
|
| 54 |
+
VERSION: 2
|
RAVE-main/annotator/oneformer/configs/coco/oneformer_R50_bs16_50ep.yaml
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_BASE_: Base-COCO-UnifiedSegmentation.yaml
|
| 2 |
+
MODEL:
|
| 3 |
+
META_ARCHITECTURE: "OneFormer"
|
| 4 |
+
SEM_SEG_HEAD:
|
| 5 |
+
NAME: "OneFormerHead"
|
| 6 |
+
IGNORE_VALUE: 255
|
| 7 |
+
NUM_CLASSES: 133
|
| 8 |
+
LOSS_WEIGHT: 1.0
|
| 9 |
+
CONVS_DIM: 256
|
| 10 |
+
MASK_DIM: 256
|
| 11 |
+
NORM: "GN"
|
| 12 |
+
# pixel decoder
|
| 13 |
+
PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
|
| 14 |
+
IN_FEATURES: ["res2", "res3", "res4", "res5"]
|
| 15 |
+
DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
|
| 16 |
+
COMMON_STRIDE: 4
|
| 17 |
+
TRANSFORMER_ENC_LAYERS: 6
|
| 18 |
+
ONE_FORMER:
|
| 19 |
+
TRANSFORMER_DECODER_NAME: "ContrastiveMultiScaleMaskedTransformerDecoder"
|
| 20 |
+
TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
|
| 21 |
+
DEEP_SUPERVISION: True
|
| 22 |
+
NO_OBJECT_WEIGHT: 0.1
|
| 23 |
+
CLASS_WEIGHT: 2.0
|
| 24 |
+
MASK_WEIGHT: 5.0
|
| 25 |
+
DICE_WEIGHT: 5.0
|
| 26 |
+
CONTRASTIVE_WEIGHT: 0.5
|
| 27 |
+
CONTRASTIVE_TEMPERATURE: 0.07
|
| 28 |
+
HIDDEN_DIM: 256
|
| 29 |
+
NUM_OBJECT_QUERIES: 150
|
| 30 |
+
USE_TASK_NORM: True
|
| 31 |
+
NHEADS: 8
|
| 32 |
+
DROPOUT: 0.1
|
| 33 |
+
DIM_FEEDFORWARD: 2048
|
| 34 |
+
ENC_LAYERS: 0
|
| 35 |
+
PRE_NORM: False
|
| 36 |
+
ENFORCE_INPUT_PROJ: False
|
| 37 |
+
SIZE_DIVISIBILITY: 32
|
| 38 |
+
CLASS_DEC_LAYERS: 2
|
| 39 |
+
DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
|
| 40 |
+
TRAIN_NUM_POINTS: 12544
|
| 41 |
+
OVERSAMPLE_RATIO: 3.0
|
| 42 |
+
IMPORTANCE_SAMPLE_RATIO: 0.75
|
| 43 |
+
TEXT_ENCODER:
|
| 44 |
+
WIDTH: 256
|
| 45 |
+
CONTEXT_LENGTH: 77
|
| 46 |
+
NUM_LAYERS: 6
|
| 47 |
+
VOCAB_SIZE: 49408
|
| 48 |
+
PROJ_NUM_LAYERS: 2
|
| 49 |
+
N_CTX: 16
|
| 50 |
+
TEST:
|
| 51 |
+
SEMANTIC_ON: True
|
| 52 |
+
INSTANCE_ON: True
|
| 53 |
+
PANOPTIC_ON: True
|
| 54 |
+
DETECTION_ON: False
|
| 55 |
+
OVERLAP_THRESHOLD: 0.8
|
| 56 |
+
OBJECT_MASK_THRESHOLD: 0.8
|
| 57 |
+
TASK: "panoptic"
|
| 58 |
+
TEST:
|
| 59 |
+
DETECTIONS_PER_IMAGE: 150
|
RAVE-main/annotator/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_BASE_: oneformer_R50_bs16_50ep.yaml
|
| 2 |
+
MODEL:
|
| 3 |
+
BACKBONE:
|
| 4 |
+
NAME: "D2SwinTransformer"
|
| 5 |
+
SWIN:
|
| 6 |
+
EMBED_DIM: 192
|
| 7 |
+
DEPTHS: [2, 2, 18, 2]
|
| 8 |
+
NUM_HEADS: [6, 12, 24, 48]
|
| 9 |
+
WINDOW_SIZE: 12
|
| 10 |
+
APE: False
|
| 11 |
+
DROP_PATH_RATE: 0.3
|
| 12 |
+
PATCH_NORM: True
|
| 13 |
+
PRETRAIN_IMG_SIZE: 384
|
| 14 |
+
WEIGHTS: "swin_large_patch4_window12_384_22k.pkl"
|
| 15 |
+
PIXEL_MEAN: [123.675, 116.280, 103.530]
|
| 16 |
+
PIXEL_STD: [58.395, 57.120, 57.375]
|
| 17 |
+
ONE_FORMER:
|
| 18 |
+
NUM_OBJECT_QUERIES: 150
|
| 19 |
+
SOLVER:
|
| 20 |
+
STEPS: (655556, 735184)
|
| 21 |
+
MAX_ITER: 737500
|
| 22 |
+
AMP:
|
| 23 |
+
ENABLED: False
|
| 24 |
+
TEST:
|
| 25 |
+
DETECTIONS_PER_IMAGE: 150
|
RAVE-main/annotator/oneformer/oneformer/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
from . import data # register all new datasets
|
| 3 |
+
from . import modeling
|
| 4 |
+
|
| 5 |
+
# config
|
| 6 |
+
from .config import *
|
| 7 |
+
|
| 8 |
+
# models
|
| 9 |
+
from .oneformer_model import OneFormer
|
RAVE-main/annotator/oneformer/oneformer/config.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 3 |
+
from annotator.oneformer.detectron2.config import CfgNode as CN
|
| 4 |
+
|
| 5 |
+
__all__ = ["add_common_config", "add_oneformer_config", "add_swin_config",
|
| 6 |
+
"add_dinat_config", "add_beit_adapter_config", "add_convnext_config"]
|
| 7 |
+
|
| 8 |
+
def add_common_config(cfg):
|
| 9 |
+
"""
|
| 10 |
+
Add config for common configuration
|
| 11 |
+
"""
|
| 12 |
+
# data config
|
| 13 |
+
# select the dataset mapper
|
| 14 |
+
cfg.INPUT.DATASET_MAPPER_NAME = "oneformer_unified"
|
| 15 |
+
# Color augmentation
|
| 16 |
+
cfg.INPUT.COLOR_AUG_SSD = False
|
| 17 |
+
# We retry random cropping until no single category in semantic segmentation GT occupies more
|
| 18 |
+
# than `SINGLE_CATEGORY_MAX_AREA` part of the crop.
|
| 19 |
+
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0
|
| 20 |
+
# Pad image and segmentation GT in dataset mapper.
|
| 21 |
+
cfg.INPUT.SIZE_DIVISIBILITY = -1
|
| 22 |
+
|
| 23 |
+
cfg.INPUT.TASK_SEQ_LEN = 77
|
| 24 |
+
cfg.INPUT.MAX_SEQ_LEN = 77
|
| 25 |
+
|
| 26 |
+
cfg.INPUT.TASK_PROB = CN()
|
| 27 |
+
cfg.INPUT.TASK_PROB.SEMANTIC = 0.33
|
| 28 |
+
cfg.INPUT.TASK_PROB.INSTANCE = 0.66
|
| 29 |
+
|
| 30 |
+
# test dataset
|
| 31 |
+
cfg.DATASETS.TEST_PANOPTIC = ("",)
|
| 32 |
+
cfg.DATASETS.TEST_INSTANCE = ("",)
|
| 33 |
+
cfg.DATASETS.TEST_SEMANTIC = ("",)
|
| 34 |
+
|
| 35 |
+
# solver config
|
| 36 |
+
# weight decay on embedding
|
| 37 |
+
cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0
|
| 38 |
+
# optimizer
|
| 39 |
+
cfg.SOLVER.OPTIMIZER = "ADAMW"
|
| 40 |
+
cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
|
| 41 |
+
|
| 42 |
+
# wandb
|
| 43 |
+
cfg.WANDB = CN()
|
| 44 |
+
cfg.WANDB.PROJECT = "unified_dense_recognition"
|
| 45 |
+
cfg.WANDB.NAME = None
|
| 46 |
+
|
| 47 |
+
cfg.MODEL.IS_TRAIN = False
|
| 48 |
+
cfg.MODEL.IS_DEMO = True
|
| 49 |
+
|
| 50 |
+
# text encoder config
|
| 51 |
+
cfg.MODEL.TEXT_ENCODER = CN()
|
| 52 |
+
|
| 53 |
+
cfg.MODEL.TEXT_ENCODER.WIDTH = 256
|
| 54 |
+
cfg.MODEL.TEXT_ENCODER.CONTEXT_LENGTH = 77
|
| 55 |
+
cfg.MODEL.TEXT_ENCODER.NUM_LAYERS = 12
|
| 56 |
+
cfg.MODEL.TEXT_ENCODER.VOCAB_SIZE = 49408
|
| 57 |
+
cfg.MODEL.TEXT_ENCODER.PROJ_NUM_LAYERS = 2
|
| 58 |
+
cfg.MODEL.TEXT_ENCODER.N_CTX = 16
|
| 59 |
+
|
| 60 |
+
# mask_former inference config
|
| 61 |
+
cfg.MODEL.TEST = CN()
|
| 62 |
+
cfg.MODEL.TEST.SEMANTIC_ON = True
|
| 63 |
+
cfg.MODEL.TEST.INSTANCE_ON = False
|
| 64 |
+
cfg.MODEL.TEST.PANOPTIC_ON = False
|
| 65 |
+
cfg.MODEL.TEST.DETECTION_ON = False
|
| 66 |
+
cfg.MODEL.TEST.OBJECT_MASK_THRESHOLD = 0.0
|
| 67 |
+
cfg.MODEL.TEST.OVERLAP_THRESHOLD = 0.0
|
| 68 |
+
cfg.MODEL.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False
|
| 69 |
+
cfg.MODEL.TEST.TASK = "panoptic"
|
| 70 |
+
|
| 71 |
+
# TEST AUG Slide
|
| 72 |
+
cfg.TEST.AUG.IS_SLIDE = False
|
| 73 |
+
cfg.TEST.AUG.CROP_SIZE = (640, 640)
|
| 74 |
+
cfg.TEST.AUG.STRIDE = (426, 426)
|
| 75 |
+
cfg.TEST.AUG.SCALE = (2048, 640)
|
| 76 |
+
cfg.TEST.AUG.SETR_MULTI_SCALE = True
|
| 77 |
+
cfg.TEST.AUG.KEEP_RATIO = True
|
| 78 |
+
cfg.TEST.AUG.SIZE_DIVISOR = 32
|
| 79 |
+
|
| 80 |
+
# pixel decoder config
|
| 81 |
+
cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256
|
| 82 |
+
# adding transformer in pixel decoder
|
| 83 |
+
cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0
|
| 84 |
+
# pixel decoder
|
| 85 |
+
cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = "BasePixelDecoder"
|
| 86 |
+
cfg.MODEL.SEM_SEG_HEAD.SEM_EMBED_DIM = 256
|
| 87 |
+
cfg.MODEL.SEM_SEG_HEAD.INST_EMBED_DIM = 256
|
| 88 |
+
|
| 89 |
+
# LSJ aug
|
| 90 |
+
cfg.INPUT.IMAGE_SIZE = 1024
|
| 91 |
+
cfg.INPUT.MIN_SCALE = 0.1
|
| 92 |
+
cfg.INPUT.MAX_SCALE = 2.0
|
| 93 |
+
|
| 94 |
+
# MSDeformAttn encoder configs
|
| 95 |
+
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = ["res3", "res4", "res5"]
|
| 96 |
+
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4
|
| 97 |
+
cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8
|
| 98 |
+
|
| 99 |
+
def add_oneformer_config(cfg):
|
| 100 |
+
"""
|
| 101 |
+
Add config for ONE_FORMER.
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
# mask_former model config
|
| 105 |
+
cfg.MODEL.ONE_FORMER = CN()
|
| 106 |
+
|
| 107 |
+
# loss
|
| 108 |
+
cfg.MODEL.ONE_FORMER.DEEP_SUPERVISION = True
|
| 109 |
+
cfg.MODEL.ONE_FORMER.NO_OBJECT_WEIGHT = 0.1
|
| 110 |
+
cfg.MODEL.ONE_FORMER.CLASS_WEIGHT = 1.0
|
| 111 |
+
cfg.MODEL.ONE_FORMER.DICE_WEIGHT = 1.0
|
| 112 |
+
cfg.MODEL.ONE_FORMER.MASK_WEIGHT = 20.0
|
| 113 |
+
cfg.MODEL.ONE_FORMER.CONTRASTIVE_WEIGHT = 0.5
|
| 114 |
+
cfg.MODEL.ONE_FORMER.CONTRASTIVE_TEMPERATURE = 0.07
|
| 115 |
+
|
| 116 |
+
# transformer config
|
| 117 |
+
cfg.MODEL.ONE_FORMER.NHEADS = 8
|
| 118 |
+
cfg.MODEL.ONE_FORMER.DROPOUT = 0.1
|
| 119 |
+
cfg.MODEL.ONE_FORMER.DIM_FEEDFORWARD = 2048
|
| 120 |
+
cfg.MODEL.ONE_FORMER.ENC_LAYERS = 0
|
| 121 |
+
cfg.MODEL.ONE_FORMER.CLASS_DEC_LAYERS = 2
|
| 122 |
+
cfg.MODEL.ONE_FORMER.DEC_LAYERS = 6
|
| 123 |
+
cfg.MODEL.ONE_FORMER.PRE_NORM = False
|
| 124 |
+
|
| 125 |
+
cfg.MODEL.ONE_FORMER.HIDDEN_DIM = 256
|
| 126 |
+
cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES = 120
|
| 127 |
+
cfg.MODEL.ONE_FORMER.NUM_OBJECT_CTX = 16
|
| 128 |
+
cfg.MODEL.ONE_FORMER.USE_TASK_NORM = True
|
| 129 |
+
|
| 130 |
+
cfg.MODEL.ONE_FORMER.TRANSFORMER_IN_FEATURE = "res5"
|
| 131 |
+
cfg.MODEL.ONE_FORMER.ENFORCE_INPUT_PROJ = False
|
| 132 |
+
|
| 133 |
+
# Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)
|
| 134 |
+
# you can use this config to override
|
| 135 |
+
cfg.MODEL.ONE_FORMER.SIZE_DIVISIBILITY = 32
|
| 136 |
+
|
| 137 |
+
# transformer module
|
| 138 |
+
cfg.MODEL.ONE_FORMER.TRANSFORMER_DECODER_NAME = "ContrastiveMultiScaleMaskedTransformerDecoder"
|
| 139 |
+
|
| 140 |
+
# point loss configs
|
| 141 |
+
# Number of points sampled during training for a mask point head.
|
| 142 |
+
cfg.MODEL.ONE_FORMER.TRAIN_NUM_POINTS = 112 * 112
|
| 143 |
+
# Oversampling parameter for PointRend point sampling during training. Parameter `k` in the
|
| 144 |
+
# original paper.
|
| 145 |
+
cfg.MODEL.ONE_FORMER.OVERSAMPLE_RATIO = 3.0
|
| 146 |
+
# Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in
|
| 147 |
+
# the original paper.
|
| 148 |
+
cfg.MODEL.ONE_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75
|
| 149 |
+
|
| 150 |
+
def add_swin_config(cfg):
|
| 151 |
+
"""
|
| 152 |
+
Add config forSWIN Backbone.
|
| 153 |
+
"""
|
| 154 |
+
|
| 155 |
+
# swin transformer backbone
|
| 156 |
+
cfg.MODEL.SWIN = CN()
|
| 157 |
+
cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224
|
| 158 |
+
cfg.MODEL.SWIN.PATCH_SIZE = 4
|
| 159 |
+
cfg.MODEL.SWIN.EMBED_DIM = 96
|
| 160 |
+
cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]
|
| 161 |
+
cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]
|
| 162 |
+
cfg.MODEL.SWIN.WINDOW_SIZE = 7
|
| 163 |
+
cfg.MODEL.SWIN.MLP_RATIO = 4.0
|
| 164 |
+
cfg.MODEL.SWIN.QKV_BIAS = True
|
| 165 |
+
cfg.MODEL.SWIN.QK_SCALE = None
|
| 166 |
+
cfg.MODEL.SWIN.DROP_RATE = 0.0
|
| 167 |
+
cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0
|
| 168 |
+
cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3
|
| 169 |
+
cfg.MODEL.SWIN.APE = False
|
| 170 |
+
cfg.MODEL.SWIN.PATCH_NORM = True
|
| 171 |
+
cfg.MODEL.SWIN.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
|
| 172 |
+
cfg.MODEL.SWIN.USE_CHECKPOINT = False
|
| 173 |
+
## Semask additions
|
| 174 |
+
cfg.MODEL.SWIN.SEM_WINDOW_SIZE = 7
|
| 175 |
+
cfg.MODEL.SWIN.NUM_SEM_BLOCKS = 1
|
| 176 |
+
|
| 177 |
+
def add_dinat_config(cfg):
|
| 178 |
+
"""
|
| 179 |
+
Add config for NAT Backbone.
|
| 180 |
+
"""
|
| 181 |
+
|
| 182 |
+
# DINAT transformer backbone
|
| 183 |
+
cfg.MODEL.DiNAT = CN()
|
| 184 |
+
cfg.MODEL.DiNAT.DEPTHS = [3, 4, 18, 5]
|
| 185 |
+
cfg.MODEL.DiNAT.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
|
| 186 |
+
cfg.MODEL.DiNAT.EMBED_DIM = 64
|
| 187 |
+
cfg.MODEL.DiNAT.MLP_RATIO = 3.0
|
| 188 |
+
cfg.MODEL.DiNAT.NUM_HEADS = [2, 4, 8, 16]
|
| 189 |
+
cfg.MODEL.DiNAT.DROP_PATH_RATE = 0.2
|
| 190 |
+
cfg.MODEL.DiNAT.KERNEL_SIZE = 7
|
| 191 |
+
cfg.MODEL.DiNAT.DILATIONS = [[1, 16, 1], [1, 4, 1, 8], [1, 2, 1, 3, 1, 4], [1, 2, 1, 2, 1]]
|
| 192 |
+
cfg.MODEL.DiNAT.OUT_INDICES = (0, 1, 2, 3)
|
| 193 |
+
cfg.MODEL.DiNAT.QKV_BIAS = True
|
| 194 |
+
cfg.MODEL.DiNAT.QK_SCALE = None
|
| 195 |
+
cfg.MODEL.DiNAT.DROP_RATE = 0
|
| 196 |
+
cfg.MODEL.DiNAT.ATTN_DROP_RATE = 0.
|
| 197 |
+
cfg.MODEL.DiNAT.IN_PATCH_SIZE = 4
|
| 198 |
+
|
| 199 |
+
def add_convnext_config(cfg):
|
| 200 |
+
"""
|
| 201 |
+
Add config for ConvNeXt Backbone.
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
# swin transformer backbone
|
| 205 |
+
cfg.MODEL.CONVNEXT = CN()
|
| 206 |
+
cfg.MODEL.CONVNEXT.IN_CHANNELS = 3
|
| 207 |
+
cfg.MODEL.CONVNEXT.DEPTHS = [3, 3, 27, 3]
|
| 208 |
+
cfg.MODEL.CONVNEXT.DIMS = [192, 384, 768, 1536]
|
| 209 |
+
cfg.MODEL.CONVNEXT.DROP_PATH_RATE = 0.4
|
| 210 |
+
cfg.MODEL.CONVNEXT.LSIT = 1.0
|
| 211 |
+
cfg.MODEL.CONVNEXT.OUT_INDICES = [0, 1, 2, 3]
|
| 212 |
+
cfg.MODEL.CONVNEXT.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
|
| 213 |
+
|
| 214 |
+
def add_beit_adapter_config(cfg):
|
| 215 |
+
"""
|
| 216 |
+
Add config for BEiT Adapter Backbone.
|
| 217 |
+
"""
|
| 218 |
+
|
| 219 |
+
# beit adapter backbone
|
| 220 |
+
cfg.MODEL.BEiTAdapter = CN()
|
| 221 |
+
cfg.MODEL.BEiTAdapter.IMG_SIZE = 640
|
| 222 |
+
cfg.MODEL.BEiTAdapter.PATCH_SIZE = 16
|
| 223 |
+
cfg.MODEL.BEiTAdapter.EMBED_DIM = 1024
|
| 224 |
+
cfg.MODEL.BEiTAdapter.DEPTH = 24
|
| 225 |
+
cfg.MODEL.BEiTAdapter.NUM_HEADS = 16
|
| 226 |
+
cfg.MODEL.BEiTAdapter.MLP_RATIO = 4
|
| 227 |
+
cfg.MODEL.BEiTAdapter.QKV_BIAS = True
|
| 228 |
+
cfg.MODEL.BEiTAdapter.USE_ABS_POS_EMB = False
|
| 229 |
+
cfg.MODEL.BEiTAdapter.USE_REL_POS_BIAS = True
|
| 230 |
+
cfg.MODEL.BEiTAdapter.INIT_VALUES = 1e-6
|
| 231 |
+
cfg.MODEL.BEiTAdapter.DROP_PATH_RATE = 0.3
|
| 232 |
+
cfg.MODEL.BEiTAdapter.CONV_INPLANE = 64
|
| 233 |
+
cfg.MODEL.BEiTAdapter.N_POINTS = 4
|
| 234 |
+
cfg.MODEL.BEiTAdapter.DEFORM_NUM_HEADS = 16
|
| 235 |
+
cfg.MODEL.BEiTAdapter.CFFN_RATIO = 0.25
|
| 236 |
+
cfg.MODEL.BEiTAdapter.DEFORM_RATIO = 0.5
|
| 237 |
+
cfg.MODEL.BEiTAdapter.WITH_CP = True
|
| 238 |
+
cfg.MODEL.BEiTAdapter.INTERACTION_INDEXES=[[0, 5], [6, 11], [12, 17], [18, 23]]
|
| 239 |
+
cfg.MODEL.BEiTAdapter.OUT_FEATURES = ["res2", "res3", "res4", "res5"]
|
RAVE-main/annotator/oneformer/oneformer/data/__init__.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
from . import datasets
|
RAVE-main/annotator/oneformer/oneformer/data/build.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
from typing import Any, Callable, Dict, List, Optional, Union
|
| 3 |
+
import torch.utils.data as torchdata
|
| 4 |
+
|
| 5 |
+
from annotator.oneformer.detectron2.config import configurable
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
from annotator.oneformer.detectron2.data.common import DatasetFromList, MapDataset
|
| 9 |
+
from annotator.oneformer.detectron2.data.dataset_mapper import DatasetMapper
|
| 10 |
+
from annotator.oneformer.detectron2.data.samplers import (
|
| 11 |
+
InferenceSampler,
|
| 12 |
+
)
|
| 13 |
+
from annotator.oneformer.detectron2.data.build import (
|
| 14 |
+
get_detection_dataset_dicts,
|
| 15 |
+
trivial_batch_collator
|
| 16 |
+
)
|
| 17 |
+
"""
|
| 18 |
+
This file contains the default logic to build a dataloader for training or testing.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
__all__ = [
|
| 22 |
+
"build_detection_test_loader",
|
| 23 |
+
]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def _test_loader_from_config(cfg, dataset_name, mapper=None):
|
| 27 |
+
"""
|
| 28 |
+
Uses the given `dataset_name` argument (instead of the names in cfg), because the
|
| 29 |
+
standard practice is to evaluate each test set individually (not combining them).
|
| 30 |
+
"""
|
| 31 |
+
if isinstance(dataset_name, str):
|
| 32 |
+
dataset_name = [dataset_name]
|
| 33 |
+
|
| 34 |
+
dataset = get_detection_dataset_dicts(
|
| 35 |
+
dataset_name,
|
| 36 |
+
filter_empty=False,
|
| 37 |
+
proposal_files=[
|
| 38 |
+
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
|
| 39 |
+
]
|
| 40 |
+
if cfg.MODEL.LOAD_PROPOSALS
|
| 41 |
+
else None,
|
| 42 |
+
)
|
| 43 |
+
if mapper is None:
|
| 44 |
+
mapper = DatasetMapper(cfg, False)
|
| 45 |
+
return {
|
| 46 |
+
"dataset": dataset,
|
| 47 |
+
"mapper": mapper,
|
| 48 |
+
"num_workers": cfg.DATALOADER.NUM_WORKERS,
|
| 49 |
+
"sampler": InferenceSampler(len(dataset))
|
| 50 |
+
if not isinstance(dataset, torchdata.IterableDataset)
|
| 51 |
+
else None,
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@configurable(from_config=_test_loader_from_config)
|
| 56 |
+
def build_detection_test_loader(
|
| 57 |
+
dataset: Union[List[Any], torchdata.Dataset],
|
| 58 |
+
*,
|
| 59 |
+
mapper: Callable[[Dict[str, Any]], Any],
|
| 60 |
+
sampler: Optional[torchdata.Sampler] = None,
|
| 61 |
+
batch_size: int = 1,
|
| 62 |
+
num_workers: int = 0,
|
| 63 |
+
collate_fn: Optional[Callable[[List[Any]], Any]] = None,
|
| 64 |
+
) -> torchdata.DataLoader:
|
| 65 |
+
"""
|
| 66 |
+
Similar to `build_detection_train_loader`, with default batch size = 1,
|
| 67 |
+
and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
|
| 68 |
+
to produce the exact set of all samples.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
dataset: a list of dataset dicts,
|
| 72 |
+
or a pytorch dataset (either map-style or iterable). They can be obtained
|
| 73 |
+
by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
|
| 74 |
+
mapper: a callable which takes a sample (dict) from dataset
|
| 75 |
+
and returns the format to be consumed by the model.
|
| 76 |
+
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
|
| 77 |
+
sampler: a sampler that produces
|
| 78 |
+
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
|
| 79 |
+
which splits the dataset across all workers. Sampler must be None
|
| 80 |
+
if `dataset` is iterable.
|
| 81 |
+
batch_size: the batch size of the data loader to be created.
|
| 82 |
+
Default to 1 image per worker since this is the standard when reporting
|
| 83 |
+
inference time in papers.
|
| 84 |
+
num_workers: number of parallel data loading workers
|
| 85 |
+
collate_fn: same as the argument of `torch.utils.data.DataLoader`.
|
| 86 |
+
Defaults to do no collation and return a list of data.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
DataLoader: a torch DataLoader, that loads the given detection
|
| 90 |
+
dataset, with test-time transformation and batching.
|
| 91 |
+
|
| 92 |
+
Examples:
|
| 93 |
+
::
|
| 94 |
+
data_loader = build_detection_test_loader(
|
| 95 |
+
DatasetRegistry.get("my_test"),
|
| 96 |
+
mapper=DatasetMapper(...))
|
| 97 |
+
|
| 98 |
+
# or, instantiate with a CfgNode:
|
| 99 |
+
data_loader = build_detection_test_loader(cfg, "my_test")
|
| 100 |
+
"""
|
| 101 |
+
if isinstance(dataset, list):
|
| 102 |
+
dataset = DatasetFromList(dataset, copy=False)
|
| 103 |
+
if mapper is not None:
|
| 104 |
+
dataset = MapDataset(dataset, mapper)
|
| 105 |
+
if isinstance(dataset, torchdata.IterableDataset):
|
| 106 |
+
assert sampler is None, "sampler must be None if dataset is IterableDataset"
|
| 107 |
+
else:
|
| 108 |
+
if sampler is None:
|
| 109 |
+
sampler = InferenceSampler(len(dataset))
|
| 110 |
+
return torchdata.DataLoader(
|
| 111 |
+
dataset,
|
| 112 |
+
batch_size=batch_size,
|
| 113 |
+
sampler=sampler,
|
| 114 |
+
drop_last=False,
|
| 115 |
+
num_workers=num_workers,
|
| 116 |
+
collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
|
| 117 |
+
)
|
RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py
|
| 3 |
+
# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
|
| 4 |
+
# ------------------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
import copy
|
| 7 |
+
import logging
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from annotator.oneformer.detectron2.data import MetadataCatalog
|
| 13 |
+
from annotator.oneformer.detectron2.config import configurable
|
| 14 |
+
from annotator.oneformer.detectron2.data import detection_utils as utils
|
| 15 |
+
from annotator.oneformer.detectron2.data import transforms as T
|
| 16 |
+
from annotator.oneformer.detectron2.structures import BitMasks, Instances
|
| 17 |
+
from annotator.oneformer.oneformer.utils.box_ops import masks_to_boxes
|
| 18 |
+
from annotator.oneformer.oneformer.data.tokenizer import SimpleTokenizer, Tokenize
|
| 19 |
+
|
| 20 |
+
__all__ = ["COCOUnifiedNewBaselineDatasetMapper"]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def build_transform_gen(cfg, is_train):
|
| 24 |
+
"""
|
| 25 |
+
Create a list of default :class:`Augmentation` from config.
|
| 26 |
+
Now it includes resizing and flipping.
|
| 27 |
+
Returns:
|
| 28 |
+
list[Augmentation]
|
| 29 |
+
"""
|
| 30 |
+
assert is_train, "Only support training augmentation"
|
| 31 |
+
image_size = cfg.INPUT.IMAGE_SIZE
|
| 32 |
+
min_scale = cfg.INPUT.MIN_SCALE
|
| 33 |
+
max_scale = cfg.INPUT.MAX_SCALE
|
| 34 |
+
|
| 35 |
+
augmentation = []
|
| 36 |
+
|
| 37 |
+
if cfg.INPUT.RANDOM_FLIP != "none":
|
| 38 |
+
augmentation.append(
|
| 39 |
+
T.RandomFlip(
|
| 40 |
+
horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
|
| 41 |
+
vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
|
| 42 |
+
)
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
augmentation.extend([
|
| 46 |
+
T.ResizeScale(
|
| 47 |
+
min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size
|
| 48 |
+
),
|
| 49 |
+
T.FixedSizeCrop(crop_size=(image_size, image_size)),
|
| 50 |
+
])
|
| 51 |
+
|
| 52 |
+
return augmentation
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# This is specifically designed for the COCO dataset.
|
| 56 |
+
class COCOUnifiedNewBaselineDatasetMapper:
|
| 57 |
+
"""
|
| 58 |
+
A callable which takes a dataset dict in Detectron2 Dataset format,
|
| 59 |
+
and map it into a format used by OneFormer.
|
| 60 |
+
|
| 61 |
+
This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation.
|
| 62 |
+
|
| 63 |
+
The callable currently does the following:
|
| 64 |
+
|
| 65 |
+
1. Read the image from "file_name"
|
| 66 |
+
2. Applies geometric transforms to the image and annotation
|
| 67 |
+
3. Find and applies suitable cropping to the image and annotation
|
| 68 |
+
4. Prepare image and annotation to Tensors
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
@configurable
|
| 72 |
+
def __init__(
|
| 73 |
+
self,
|
| 74 |
+
is_train=True,
|
| 75 |
+
*,
|
| 76 |
+
num_queries,
|
| 77 |
+
tfm_gens,
|
| 78 |
+
meta,
|
| 79 |
+
image_format,
|
| 80 |
+
max_seq_len,
|
| 81 |
+
task_seq_len,
|
| 82 |
+
semantic_prob,
|
| 83 |
+
instance_prob,
|
| 84 |
+
):
|
| 85 |
+
"""
|
| 86 |
+
NOTE: this interface is experimental.
|
| 87 |
+
Args:
|
| 88 |
+
is_train: for training or inference
|
| 89 |
+
augmentations: a list of augmentations or deterministic transforms to apply
|
| 90 |
+
crop_gen: crop augmentation
|
| 91 |
+
tfm_gens: data augmentation
|
| 92 |
+
image_format: an image format supported by :func:`detection_utils.read_image`.
|
| 93 |
+
"""
|
| 94 |
+
self.tfm_gens = tfm_gens
|
| 95 |
+
logging.getLogger(__name__).info(
|
| 96 |
+
"[COCOUnifiedNewBaselineDatasetMapper] Full TransformGens used in training: {}".format(
|
| 97 |
+
str(self.tfm_gens)
|
| 98 |
+
)
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
self.img_format = image_format
|
| 102 |
+
self.is_train = is_train
|
| 103 |
+
self.meta = meta
|
| 104 |
+
self.ignore_label = self.meta.ignore_label
|
| 105 |
+
self.num_queries = num_queries
|
| 106 |
+
|
| 107 |
+
self.things = []
|
| 108 |
+
for k,v in self.meta.thing_dataset_id_to_contiguous_id.items():
|
| 109 |
+
self.things.append(v)
|
| 110 |
+
self.class_names = self.meta.stuff_classes
|
| 111 |
+
self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len)
|
| 112 |
+
self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len)
|
| 113 |
+
self.semantic_prob = semantic_prob
|
| 114 |
+
self.instance_prob = instance_prob
|
| 115 |
+
|
| 116 |
+
@classmethod
|
| 117 |
+
def from_config(cls, cfg, is_train=True):
|
| 118 |
+
# Build augmentation
|
| 119 |
+
tfm_gens = build_transform_gen(cfg, is_train)
|
| 120 |
+
dataset_names = cfg.DATASETS.TRAIN
|
| 121 |
+
meta = MetadataCatalog.get(dataset_names[0])
|
| 122 |
+
|
| 123 |
+
ret = {
|
| 124 |
+
"is_train": is_train,
|
| 125 |
+
"meta": meta,
|
| 126 |
+
"tfm_gens": tfm_gens,
|
| 127 |
+
"image_format": cfg.INPUT.FORMAT,
|
| 128 |
+
"num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES - cfg.MODEL.TEXT_ENCODER.N_CTX,
|
| 129 |
+
"task_seq_len": cfg.INPUT.TASK_SEQ_LEN,
|
| 130 |
+
"max_seq_len": cfg.INPUT.MAX_SEQ_LEN,
|
| 131 |
+
"semantic_prob": cfg.INPUT.TASK_PROB.SEMANTIC,
|
| 132 |
+
"instance_prob": cfg.INPUT.TASK_PROB.INSTANCE,
|
| 133 |
+
}
|
| 134 |
+
return ret
|
| 135 |
+
|
| 136 |
+
def _get_semantic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
|
| 137 |
+
instances = Instances(image_shape)
|
| 138 |
+
|
| 139 |
+
classes = []
|
| 140 |
+
texts = ["a semantic photo"] * self.num_queries
|
| 141 |
+
masks = []
|
| 142 |
+
label = np.ones_like(pan_seg_gt) * self.ignore_label
|
| 143 |
+
|
| 144 |
+
for segment_info in segments_info:
|
| 145 |
+
class_id = segment_info["category_id"]
|
| 146 |
+
if not segment_info["iscrowd"]:
|
| 147 |
+
mask = pan_seg_gt == segment_info["id"]
|
| 148 |
+
if not np.all(mask == False):
|
| 149 |
+
if class_id not in classes:
|
| 150 |
+
cls_name = self.class_names[class_id]
|
| 151 |
+
classes.append(class_id)
|
| 152 |
+
masks.append(mask)
|
| 153 |
+
num_class_obj[cls_name] += 1
|
| 154 |
+
else:
|
| 155 |
+
idx = classes.index(class_id)
|
| 156 |
+
masks[idx] += mask
|
| 157 |
+
masks[idx] = np.clip(masks[idx], 0, 1).astype(np.bool)
|
| 158 |
+
label[mask] = class_id
|
| 159 |
+
|
| 160 |
+
num = 0
|
| 161 |
+
for i, cls_name in enumerate(self.class_names):
|
| 162 |
+
if num_class_obj[cls_name] > 0:
|
| 163 |
+
for _ in range(num_class_obj[cls_name]):
|
| 164 |
+
if num >= len(texts):
|
| 165 |
+
break
|
| 166 |
+
texts[num] = f"a photo with a {cls_name}"
|
| 167 |
+
num += 1
|
| 168 |
+
|
| 169 |
+
classes = np.array(classes)
|
| 170 |
+
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
|
| 171 |
+
if len(masks) == 0:
|
| 172 |
+
# Some image does not have annotation (all ignored)
|
| 173 |
+
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
|
| 174 |
+
instances.gt_bboxes = torch.zeros((0, 4))
|
| 175 |
+
else:
|
| 176 |
+
masks = BitMasks(
|
| 177 |
+
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
|
| 178 |
+
)
|
| 179 |
+
instances.gt_masks = masks.tensor
|
| 180 |
+
# Placeholder bounding boxes for stuff regions. Note that these are not used during training.
|
| 181 |
+
instances.gt_bboxes = torch.stack([torch.tensor([0., 0., 1., 1.])] * instances.gt_masks.shape[0])
|
| 182 |
+
return instances, texts, label
|
| 183 |
+
|
| 184 |
+
def _get_instance_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
|
| 185 |
+
instances = Instances(image_shape)
|
| 186 |
+
|
| 187 |
+
classes = []
|
| 188 |
+
texts = ["an instance photo"] * self.num_queries
|
| 189 |
+
masks = []
|
| 190 |
+
label = np.ones_like(pan_seg_gt) * self.ignore_label
|
| 191 |
+
|
| 192 |
+
for segment_info in segments_info:
|
| 193 |
+
class_id = segment_info["category_id"]
|
| 194 |
+
if class_id in self.things:
|
| 195 |
+
if not segment_info["iscrowd"]:
|
| 196 |
+
mask = pan_seg_gt == segment_info["id"]
|
| 197 |
+
if not np.all(mask == False):
|
| 198 |
+
cls_name = self.class_names[class_id]
|
| 199 |
+
classes.append(class_id)
|
| 200 |
+
masks.append(mask)
|
| 201 |
+
num_class_obj[cls_name] += 1
|
| 202 |
+
label[mask] = class_id
|
| 203 |
+
|
| 204 |
+
num = 0
|
| 205 |
+
for i, cls_name in enumerate(self.class_names):
|
| 206 |
+
if num_class_obj[cls_name] > 0:
|
| 207 |
+
for _ in range(num_class_obj[cls_name]):
|
| 208 |
+
if num >= len(texts):
|
| 209 |
+
break
|
| 210 |
+
texts[num] = f"a photo with a {cls_name}"
|
| 211 |
+
num += 1
|
| 212 |
+
|
| 213 |
+
classes = np.array(classes)
|
| 214 |
+
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
|
| 215 |
+
if len(masks) == 0:
|
| 216 |
+
# Some image does not have annotation (all ignored)
|
| 217 |
+
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
|
| 218 |
+
instances.gt_bboxes = torch.zeros((0, 4))
|
| 219 |
+
else:
|
| 220 |
+
masks = BitMasks(
|
| 221 |
+
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
|
| 222 |
+
)
|
| 223 |
+
instances.gt_masks = masks.tensor
|
| 224 |
+
instances.gt_bboxes = masks_to_boxes(instances.gt_masks)
|
| 225 |
+
return instances, texts, label
|
| 226 |
+
|
| 227 |
+
def _get_panoptic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
|
| 228 |
+
instances = Instances(image_shape)
|
| 229 |
+
|
| 230 |
+
classes = []
|
| 231 |
+
texts = ["a panoptic photo"] * self.num_queries
|
| 232 |
+
masks = []
|
| 233 |
+
label = np.ones_like(pan_seg_gt) * self.ignore_label
|
| 234 |
+
|
| 235 |
+
for segment_info in segments_info:
|
| 236 |
+
class_id = segment_info["category_id"]
|
| 237 |
+
if not segment_info["iscrowd"]:
|
| 238 |
+
mask = pan_seg_gt == segment_info["id"]
|
| 239 |
+
if not np.all(mask == False):
|
| 240 |
+
cls_name = self.class_names[class_id]
|
| 241 |
+
classes.append(class_id)
|
| 242 |
+
masks.append(mask)
|
| 243 |
+
num_class_obj[cls_name] += 1
|
| 244 |
+
label[mask] = class_id
|
| 245 |
+
|
| 246 |
+
num = 0
|
| 247 |
+
for i, cls_name in enumerate(self.class_names):
|
| 248 |
+
if num_class_obj[cls_name] > 0:
|
| 249 |
+
for _ in range(num_class_obj[cls_name]):
|
| 250 |
+
if num >= len(texts):
|
| 251 |
+
break
|
| 252 |
+
texts[num] = f"a photo with a {cls_name}"
|
| 253 |
+
num += 1
|
| 254 |
+
|
| 255 |
+
classes = np.array(classes)
|
| 256 |
+
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
|
| 257 |
+
if len(masks) == 0:
|
| 258 |
+
# Some image does not have annotation (all ignored)
|
| 259 |
+
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
|
| 260 |
+
instances.gt_bboxes = torch.zeros((0, 4))
|
| 261 |
+
else:
|
| 262 |
+
masks = BitMasks(
|
| 263 |
+
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
|
| 264 |
+
)
|
| 265 |
+
instances.gt_masks = masks.tensor
|
| 266 |
+
instances.gt_bboxes = masks_to_boxes(instances.gt_masks)
|
| 267 |
+
for i in range(instances.gt_classes.shape[0]):
|
| 268 |
+
# Placeholder bounding boxes for stuff regions. Note that these are not used during training.
|
| 269 |
+
if instances.gt_classes[i].item() not in self.things:
|
| 270 |
+
instances.gt_bboxes[i] = torch.tensor([0., 0., 1., 1.])
|
| 271 |
+
return instances, texts, label
|
| 272 |
+
|
| 273 |
+
def __call__(self, dataset_dict):
|
| 274 |
+
"""
|
| 275 |
+
Args:
|
| 276 |
+
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
|
| 277 |
+
|
| 278 |
+
Returns:
|
| 279 |
+
dict: a format that builtin models in detectron2 accept
|
| 280 |
+
"""
|
| 281 |
+
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
|
| 282 |
+
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
|
| 283 |
+
utils.check_image_size(dataset_dict, image)
|
| 284 |
+
|
| 285 |
+
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
|
| 286 |
+
image_shape = image.shape[:2] # h, w
|
| 287 |
+
|
| 288 |
+
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
|
| 289 |
+
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
|
| 290 |
+
# Therefore it's important to use torch.Tensor.
|
| 291 |
+
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
|
| 292 |
+
|
| 293 |
+
if not self.is_train:
|
| 294 |
+
# USER: Modify this if you want to keep them for some reason.
|
| 295 |
+
dataset_dict.pop("annotations", None)
|
| 296 |
+
return dataset_dict
|
| 297 |
+
|
| 298 |
+
# semantic segmentation
|
| 299 |
+
if "sem_seg_file_name" in dataset_dict:
|
| 300 |
+
# PyTorch transformation not implemented for uint16, so converting it to double first
|
| 301 |
+
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double")
|
| 302 |
+
sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
|
| 303 |
+
else:
|
| 304 |
+
sem_seg_gt = None
|
| 305 |
+
|
| 306 |
+
if "pan_seg_file_name" in dataset_dict:
|
| 307 |
+
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
|
| 308 |
+
segments_info = dataset_dict["segments_info"]
|
| 309 |
+
|
| 310 |
+
# apply the same transformation to panoptic segmentation
|
| 311 |
+
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
|
| 312 |
+
|
| 313 |
+
from panopticapi.utils import rgb2id
|
| 314 |
+
pan_seg_gt = rgb2id(pan_seg_gt)
|
| 315 |
+
|
| 316 |
+
prob_task = np.random.uniform(0,1.)
|
| 317 |
+
|
| 318 |
+
num_class_obj = {}
|
| 319 |
+
|
| 320 |
+
for name in self.class_names:
|
| 321 |
+
num_class_obj[name] = 0
|
| 322 |
+
|
| 323 |
+
if prob_task < self.semantic_prob:
|
| 324 |
+
task = "The task is semantic"
|
| 325 |
+
instances, text, sem_seg = self._get_semantic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
|
| 326 |
+
elif prob_task < self.instance_prob:
|
| 327 |
+
task = "The task is instance"
|
| 328 |
+
instances, text, sem_seg = self._get_instance_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
|
| 329 |
+
else:
|
| 330 |
+
task = "The task is panoptic"
|
| 331 |
+
instances, text, sem_seg = self._get_panoptic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
dataset_dict["sem_seg"] = torch.from_numpy(sem_seg).long()
|
| 335 |
+
dataset_dict["instances"] = instances
|
| 336 |
+
dataset_dict["orig_shape"] = image_shape
|
| 337 |
+
dataset_dict["task"] = task
|
| 338 |
+
dataset_dict["text"] = text
|
| 339 |
+
dataset_dict["thing_ids"] = self.things
|
| 340 |
+
|
| 341 |
+
return dataset_dict
|
RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/dataset_mapper.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/dataset_mapper.py
|
| 3 |
+
# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
|
| 4 |
+
# ------------------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
import copy
|
| 7 |
+
import logging
|
| 8 |
+
import numpy as np
|
| 9 |
+
from typing import List, Optional, Union
|
| 10 |
+
import torch
|
| 11 |
+
|
| 12 |
+
from annotator.oneformer.detectron2.config import configurable
|
| 13 |
+
|
| 14 |
+
from annotator.oneformer.detectron2.data import detection_utils as utils
|
| 15 |
+
from annotator.oneformer.detectron2.data import transforms as T
|
| 16 |
+
from annotator.oneformer.oneformer.data.tokenizer import SimpleTokenizer, Tokenize
|
| 17 |
+
|
| 18 |
+
__all__ = ["DatasetMapper"]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class DatasetMapper:
|
| 22 |
+
"""
|
| 23 |
+
A callable which takes a dataset dict in Detectron2 Dataset format,
|
| 24 |
+
and map it into a format used by the model.
|
| 25 |
+
|
| 26 |
+
This is the default callable to be used to map your dataset dict into training data.
|
| 27 |
+
You may need to follow it to implement your own one for customized logic,
|
| 28 |
+
such as a different way to read or transform images.
|
| 29 |
+
See :doc:`/tutorials/data_loading` for details.
|
| 30 |
+
|
| 31 |
+
The callable currently does the following:
|
| 32 |
+
|
| 33 |
+
1. Read the image from "file_name"
|
| 34 |
+
2. Applies cropping/geometric transforms to the image and annotations
|
| 35 |
+
3. Prepare data and annotations to Tensor and :class:`Instances`
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
@configurable
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
is_train: bool,
|
| 42 |
+
*,
|
| 43 |
+
augmentations: List[Union[T.Augmentation, T.Transform]],
|
| 44 |
+
image_format: str,
|
| 45 |
+
task_seq_len: int,
|
| 46 |
+
task: str = "panoptic",
|
| 47 |
+
use_instance_mask: bool = False,
|
| 48 |
+
use_keypoint: bool = False,
|
| 49 |
+
instance_mask_format: str = "polygon",
|
| 50 |
+
keypoint_hflip_indices: Optional[np.ndarray] = None,
|
| 51 |
+
precomputed_proposal_topk: Optional[int] = None,
|
| 52 |
+
recompute_boxes: bool = False,
|
| 53 |
+
):
|
| 54 |
+
"""
|
| 55 |
+
NOTE: this interface is experimental.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
is_train: whether it's used in training or inference
|
| 59 |
+
augmentations: a list of augmentations or deterministic transforms to apply
|
| 60 |
+
image_format: an image format supported by :func:`detection_utils.read_image`.
|
| 61 |
+
use_instance_mask: whether to process instance segmentation annotations, if available
|
| 62 |
+
use_keypoint: whether to process keypoint annotations if available
|
| 63 |
+
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
|
| 64 |
+
masks into this format.
|
| 65 |
+
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
|
| 66 |
+
precomputed_proposal_topk: if given, will load pre-computed
|
| 67 |
+
proposals from dataset_dict and keep the top k proposals for each image.
|
| 68 |
+
recompute_boxes: whether to overwrite bounding box annotations
|
| 69 |
+
by computing tight bounding boxes from instance mask annotations.
|
| 70 |
+
"""
|
| 71 |
+
if recompute_boxes:
|
| 72 |
+
assert use_instance_mask, "recompute_boxes requires instance masks"
|
| 73 |
+
# fmt: off
|
| 74 |
+
self.is_train = is_train
|
| 75 |
+
self.augmentations = T.AugmentationList(augmentations)
|
| 76 |
+
self.image_format = image_format
|
| 77 |
+
self.use_instance_mask = use_instance_mask
|
| 78 |
+
self.instance_mask_format = instance_mask_format
|
| 79 |
+
self.use_keypoint = use_keypoint
|
| 80 |
+
self.keypoint_hflip_indices = keypoint_hflip_indices
|
| 81 |
+
self.proposal_topk = precomputed_proposal_topk
|
| 82 |
+
self.recompute_boxes = recompute_boxes
|
| 83 |
+
self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len)
|
| 84 |
+
self.task = task
|
| 85 |
+
assert self.task in ["panoptic", "semantic", "instance"]
|
| 86 |
+
|
| 87 |
+
# fmt: on
|
| 88 |
+
logger = logging.getLogger(__name__)
|
| 89 |
+
mode = "training" if is_train else "inference"
|
| 90 |
+
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
|
| 91 |
+
|
| 92 |
+
@classmethod
|
| 93 |
+
def from_config(cls, cfg, is_train: bool = True):
|
| 94 |
+
augs = utils.build_augmentation(cfg, is_train)
|
| 95 |
+
if cfg.INPUT.CROP.ENABLED and is_train:
|
| 96 |
+
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
|
| 97 |
+
recompute_boxes = cfg.MODEL.MASK_ON
|
| 98 |
+
else:
|
| 99 |
+
recompute_boxes = False
|
| 100 |
+
|
| 101 |
+
ret = {
|
| 102 |
+
"is_train": is_train,
|
| 103 |
+
"augmentations": augs,
|
| 104 |
+
"image_format": cfg.INPUT.FORMAT,
|
| 105 |
+
"use_instance_mask": cfg.MODEL.MASK_ON,
|
| 106 |
+
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
|
| 107 |
+
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
|
| 108 |
+
"task_seq_len": cfg.INPUT.TASK_SEQ_LEN,
|
| 109 |
+
"recompute_boxes": recompute_boxes,
|
| 110 |
+
"task": cfg.MODEL.TEST.TASK,
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
if cfg.MODEL.KEYPOINT_ON:
|
| 114 |
+
ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
|
| 115 |
+
|
| 116 |
+
if cfg.MODEL.LOAD_PROPOSALS:
|
| 117 |
+
ret["precomputed_proposal_topk"] = (
|
| 118 |
+
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
|
| 119 |
+
if is_train
|
| 120 |
+
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
|
| 121 |
+
)
|
| 122 |
+
return ret
|
| 123 |
+
|
| 124 |
+
def _transform_annotations(self, dataset_dict, transforms, image_shape):
|
| 125 |
+
# USER: Modify this if you want to keep them for some reason.
|
| 126 |
+
for anno in dataset_dict["annotations"]:
|
| 127 |
+
if not self.use_instance_mask:
|
| 128 |
+
anno.pop("segmentation", None)
|
| 129 |
+
if not self.use_keypoint:
|
| 130 |
+
anno.pop("keypoints", None)
|
| 131 |
+
|
| 132 |
+
# USER: Implement additional transformations if you have other types of data
|
| 133 |
+
annos = [
|
| 134 |
+
utils.transform_instance_annotations(
|
| 135 |
+
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
|
| 136 |
+
)
|
| 137 |
+
for obj in dataset_dict.pop("annotations")
|
| 138 |
+
if obj.get("iscrowd", 0) == 0
|
| 139 |
+
]
|
| 140 |
+
instances = utils.annotations_to_instances(
|
| 141 |
+
annos, image_shape, mask_format=self.instance_mask_format
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
# After transforms such as cropping are applied, the bounding box may no longer
|
| 145 |
+
# tightly bound the object. As an example, imagine a triangle object
|
| 146 |
+
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
|
| 147 |
+
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
|
| 148 |
+
# the intersection of original bounding box and the cropping box.
|
| 149 |
+
if self.recompute_boxes:
|
| 150 |
+
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
|
| 151 |
+
dataset_dict["instances"] = utils.filter_empty_instances(instances)
|
| 152 |
+
|
| 153 |
+
def __call__(self, dataset_dict):
|
| 154 |
+
"""
|
| 155 |
+
Args:
|
| 156 |
+
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
dict: a format that builtin models in detectron2 accept
|
| 160 |
+
"""
|
| 161 |
+
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
|
| 162 |
+
# USER: Write your own image loading if it's not from a file
|
| 163 |
+
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
|
| 164 |
+
utils.check_image_size(dataset_dict, image)
|
| 165 |
+
|
| 166 |
+
task = f"The task is {self.task}"
|
| 167 |
+
dataset_dict["task"] = task
|
| 168 |
+
|
| 169 |
+
# USER: Remove if you don't do semantic/panoptic segmentation.
|
| 170 |
+
if "sem_seg_file_name" in dataset_dict:
|
| 171 |
+
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
|
| 172 |
+
else:
|
| 173 |
+
sem_seg_gt = None
|
| 174 |
+
|
| 175 |
+
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
|
| 176 |
+
transforms = self.augmentations(aug_input)
|
| 177 |
+
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
|
| 178 |
+
|
| 179 |
+
image_shape = image.shape[:2] # h, w
|
| 180 |
+
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
|
| 181 |
+
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
|
| 182 |
+
# Therefore it's important to use torch.Tensor.
|
| 183 |
+
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
|
| 184 |
+
if sem_seg_gt is not None:
|
| 185 |
+
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
|
| 186 |
+
|
| 187 |
+
# USER: Remove if you don't use pre-computed proposals.
|
| 188 |
+
# Most users would not need this feature.
|
| 189 |
+
if self.proposal_topk is not None:
|
| 190 |
+
utils.transform_proposals(
|
| 191 |
+
dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
if not self.is_train:
|
| 195 |
+
# USER: Modify this if you want to keep them for some reason.
|
| 196 |
+
dataset_dict.pop("annotations", None)
|
| 197 |
+
dataset_dict.pop("sem_seg_file_name", None)
|
| 198 |
+
return dataset_dict
|
| 199 |
+
|
| 200 |
+
if "annotations" in dataset_dict:
|
| 201 |
+
self._transform_annotations(dataset_dict, transforms, image_shape)
|
| 202 |
+
|
| 203 |
+
return dataset_dict
|
RAVE-main/annotator/oneformer/oneformer/data/dataset_mappers/oneformer_unified_dataset_mapper.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/dataset_mappers/mask_former_panoptic_dataset_mapper.py
|
| 3 |
+
# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
|
| 4 |
+
# ------------------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
import copy
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import torch
|
| 12 |
+
from torch.nn import functional as F
|
| 13 |
+
|
| 14 |
+
from annotator.oneformer.detectron2.config import configurable
|
| 15 |
+
from annotator.oneformer.detectron2.data import detection_utils as utils
|
| 16 |
+
from annotator.oneformer.detectron2.data import transforms as T
|
| 17 |
+
from annotator.oneformer.detectron2.structures import BitMasks, Instances
|
| 18 |
+
from annotator.oneformer.detectron2.data import MetadataCatalog
|
| 19 |
+
from annotator.oneformer.detectron2.projects.point_rend import ColorAugSSDTransform
|
| 20 |
+
from annotator.oneformer.oneformer.utils.box_ops import masks_to_boxes
|
| 21 |
+
from annotator.oneformer.oneformer.data.tokenizer import SimpleTokenizer, Tokenize
|
| 22 |
+
|
| 23 |
+
__all__ = ["OneFormerUnifiedDatasetMapper"]
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class OneFormerUnifiedDatasetMapper:
|
| 27 |
+
"""
|
| 28 |
+
A callable which takes a dataset dict in Detectron2 Dataset format,
|
| 29 |
+
and map it into a format used by OneFormer for universal segmentation.
|
| 30 |
+
|
| 31 |
+
The callable currently does the following:
|
| 32 |
+
|
| 33 |
+
1. Read the image from "file_name"
|
| 34 |
+
2. Applies geometric transforms to the image and annotation
|
| 35 |
+
3. Find and applies suitable cropping to the image and annotation
|
| 36 |
+
4. Prepare image and annotation to Tensors
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
@configurable
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
is_train=True,
|
| 43 |
+
*,
|
| 44 |
+
name,
|
| 45 |
+
num_queries,
|
| 46 |
+
meta,
|
| 47 |
+
augmentations,
|
| 48 |
+
image_format,
|
| 49 |
+
ignore_label,
|
| 50 |
+
size_divisibility,
|
| 51 |
+
task_seq_len,
|
| 52 |
+
max_seq_len,
|
| 53 |
+
semantic_prob,
|
| 54 |
+
instance_prob,
|
| 55 |
+
):
|
| 56 |
+
"""
|
| 57 |
+
NOTE: this interface is experimental.
|
| 58 |
+
Args:
|
| 59 |
+
is_train: for training or inference
|
| 60 |
+
augmentations: a list of augmentations or deterministic transforms to apply
|
| 61 |
+
image_format: an image format supported by :func:`detection_utils.read_image`.
|
| 62 |
+
ignore_label: the label that is ignored to evaluation
|
| 63 |
+
size_divisibility: pad image size to be divisible by this value
|
| 64 |
+
"""
|
| 65 |
+
self.is_train = is_train
|
| 66 |
+
self.meta = meta
|
| 67 |
+
self.name = name
|
| 68 |
+
self.tfm_gens = augmentations
|
| 69 |
+
self.img_format = image_format
|
| 70 |
+
self.ignore_label = ignore_label
|
| 71 |
+
self.size_divisibility = size_divisibility
|
| 72 |
+
self.num_queries = num_queries
|
| 73 |
+
|
| 74 |
+
logger = logging.getLogger(__name__)
|
| 75 |
+
mode = "training" if is_train else "inference"
|
| 76 |
+
logger.info(f"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations}")
|
| 77 |
+
|
| 78 |
+
self.things = []
|
| 79 |
+
for k,v in self.meta.thing_dataset_id_to_contiguous_id.items():
|
| 80 |
+
self.things.append(v)
|
| 81 |
+
self.class_names = self.meta.stuff_classes
|
| 82 |
+
self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len)
|
| 83 |
+
self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len)
|
| 84 |
+
self.semantic_prob = semantic_prob
|
| 85 |
+
self.instance_prob = instance_prob
|
| 86 |
+
|
| 87 |
+
@classmethod
|
| 88 |
+
def from_config(cls, cfg, is_train=True):
|
| 89 |
+
# Build augmentation
|
| 90 |
+
augs = [
|
| 91 |
+
T.ResizeShortestEdge(
|
| 92 |
+
cfg.INPUT.MIN_SIZE_TRAIN,
|
| 93 |
+
cfg.INPUT.MAX_SIZE_TRAIN,
|
| 94 |
+
cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING,
|
| 95 |
+
)
|
| 96 |
+
]
|
| 97 |
+
if cfg.INPUT.CROP.ENABLED:
|
| 98 |
+
augs.append(
|
| 99 |
+
T.RandomCrop_CategoryAreaConstraint(
|
| 100 |
+
cfg.INPUT.CROP.TYPE,
|
| 101 |
+
cfg.INPUT.CROP.SIZE,
|
| 102 |
+
cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA,
|
| 103 |
+
cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,
|
| 104 |
+
)
|
| 105 |
+
)
|
| 106 |
+
if cfg.INPUT.COLOR_AUG_SSD:
|
| 107 |
+
augs.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))
|
| 108 |
+
augs.append(T.RandomFlip())
|
| 109 |
+
|
| 110 |
+
# Assume always applies to the training set.
|
| 111 |
+
dataset_names = cfg.DATASETS.TRAIN
|
| 112 |
+
meta = MetadataCatalog.get(dataset_names[0])
|
| 113 |
+
ignore_label = meta.ignore_label
|
| 114 |
+
|
| 115 |
+
ret = {
|
| 116 |
+
"is_train": is_train,
|
| 117 |
+
"meta": meta,
|
| 118 |
+
"name": dataset_names[0],
|
| 119 |
+
"num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES - cfg.MODEL.TEXT_ENCODER.N_CTX,
|
| 120 |
+
"task_seq_len": cfg.INPUT.TASK_SEQ_LEN,
|
| 121 |
+
"max_seq_len": cfg.INPUT.MAX_SEQ_LEN,
|
| 122 |
+
"augmentations": augs,
|
| 123 |
+
"image_format": cfg.INPUT.FORMAT,
|
| 124 |
+
"ignore_label": ignore_label,
|
| 125 |
+
"size_divisibility": cfg.INPUT.SIZE_DIVISIBILITY,
|
| 126 |
+
"semantic_prob": cfg.INPUT.TASK_PROB.SEMANTIC,
|
| 127 |
+
"instance_prob": cfg.INPUT.TASK_PROB.INSTANCE,
|
| 128 |
+
}
|
| 129 |
+
return ret
|
| 130 |
+
|
| 131 |
+
def _get_semantic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
|
| 132 |
+
pan_seg_gt = pan_seg_gt.numpy()
|
| 133 |
+
instances = Instances(image_shape)
|
| 134 |
+
|
| 135 |
+
classes = []
|
| 136 |
+
texts = ["a semantic photo"] * self.num_queries
|
| 137 |
+
masks = []
|
| 138 |
+
label = np.ones_like(pan_seg_gt) * self.ignore_label
|
| 139 |
+
|
| 140 |
+
for segment_info in segments_info:
|
| 141 |
+
class_id = segment_info["category_id"]
|
| 142 |
+
if not segment_info["iscrowd"]:
|
| 143 |
+
mask = pan_seg_gt == segment_info["id"]
|
| 144 |
+
if not np.all(mask == False):
|
| 145 |
+
if class_id not in classes:
|
| 146 |
+
cls_name = self.class_names[class_id]
|
| 147 |
+
classes.append(class_id)
|
| 148 |
+
masks.append(mask)
|
| 149 |
+
num_class_obj[cls_name] += 1
|
| 150 |
+
else:
|
| 151 |
+
idx = classes.index(class_id)
|
| 152 |
+
masks[idx] += mask
|
| 153 |
+
masks[idx] = np.clip(masks[idx], 0, 1).astype(np.bool)
|
| 154 |
+
label[mask] = class_id
|
| 155 |
+
|
| 156 |
+
num = 0
|
| 157 |
+
for i, cls_name in enumerate(self.class_names):
|
| 158 |
+
if num_class_obj[cls_name] > 0:
|
| 159 |
+
for _ in range(num_class_obj[cls_name]):
|
| 160 |
+
if num >= len(texts):
|
| 161 |
+
break
|
| 162 |
+
texts[num] = f"a photo with a {cls_name}"
|
| 163 |
+
num += 1
|
| 164 |
+
|
| 165 |
+
classes = np.array(classes)
|
| 166 |
+
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
|
| 167 |
+
if len(masks) == 0:
|
| 168 |
+
# Some image does not have annotation (all ignored)
|
| 169 |
+
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
|
| 170 |
+
instances.gt_bboxes = torch.zeros((0, 4))
|
| 171 |
+
else:
|
| 172 |
+
masks = BitMasks(
|
| 173 |
+
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
|
| 174 |
+
)
|
| 175 |
+
instances.gt_masks = masks.tensor
|
| 176 |
+
# Placeholder bounding boxes for stuff regions. Note that these are not used during training.
|
| 177 |
+
instances.gt_bboxes = torch.stack([torch.tensor([0., 0., 1., 1.])] * instances.gt_masks.shape[0])
|
| 178 |
+
return instances, texts, label
|
| 179 |
+
|
| 180 |
+
def _get_instance_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
|
| 181 |
+
pan_seg_gt = pan_seg_gt.numpy()
|
| 182 |
+
instances = Instances(image_shape)
|
| 183 |
+
|
| 184 |
+
classes = []
|
| 185 |
+
texts = ["an instance photo"] * self.num_queries
|
| 186 |
+
masks = []
|
| 187 |
+
label = np.ones_like(pan_seg_gt) * self.ignore_label
|
| 188 |
+
|
| 189 |
+
for segment_info in segments_info:
|
| 190 |
+
class_id = segment_info["category_id"]
|
| 191 |
+
if class_id in self.things:
|
| 192 |
+
if not segment_info["iscrowd"]:
|
| 193 |
+
mask = pan_seg_gt == segment_info["id"]
|
| 194 |
+
if not np.all(mask == False):
|
| 195 |
+
cls_name = self.class_names[class_id]
|
| 196 |
+
classes.append(class_id)
|
| 197 |
+
masks.append(mask)
|
| 198 |
+
num_class_obj[cls_name] += 1
|
| 199 |
+
label[mask] = class_id
|
| 200 |
+
|
| 201 |
+
num = 0
|
| 202 |
+
for i, cls_name in enumerate(self.class_names):
|
| 203 |
+
if num_class_obj[cls_name] > 0:
|
| 204 |
+
for _ in range(num_class_obj[cls_name]):
|
| 205 |
+
if num >= len(texts):
|
| 206 |
+
break
|
| 207 |
+
texts[num] = f"a photo with a {cls_name}"
|
| 208 |
+
num += 1
|
| 209 |
+
|
| 210 |
+
classes = np.array(classes)
|
| 211 |
+
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
|
| 212 |
+
if len(masks) == 0:
|
| 213 |
+
# Some image does not have annotation (all ignored)
|
| 214 |
+
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
|
| 215 |
+
instances.gt_bboxes = torch.zeros((0, 4))
|
| 216 |
+
else:
|
| 217 |
+
masks = BitMasks(
|
| 218 |
+
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
|
| 219 |
+
)
|
| 220 |
+
instances.gt_masks = masks.tensor
|
| 221 |
+
instances.gt_bboxes = masks_to_boxes(instances.gt_masks)
|
| 222 |
+
return instances, texts, label
|
| 223 |
+
|
| 224 |
+
def _get_panoptic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj):
|
| 225 |
+
pan_seg_gt = pan_seg_gt.numpy()
|
| 226 |
+
instances = Instances(image_shape)
|
| 227 |
+
|
| 228 |
+
classes = []
|
| 229 |
+
texts = ["a panoptic photo"] * self.num_queries
|
| 230 |
+
masks = []
|
| 231 |
+
label = np.ones_like(pan_seg_gt) * self.ignore_label
|
| 232 |
+
|
| 233 |
+
for segment_info in segments_info:
|
| 234 |
+
class_id = segment_info["category_id"]
|
| 235 |
+
if not segment_info["iscrowd"]:
|
| 236 |
+
mask = pan_seg_gt == segment_info["id"]
|
| 237 |
+
if not np.all(mask == False):
|
| 238 |
+
cls_name = self.class_names[class_id]
|
| 239 |
+
classes.append(class_id)
|
| 240 |
+
masks.append(mask)
|
| 241 |
+
num_class_obj[cls_name] += 1
|
| 242 |
+
label[mask] = class_id
|
| 243 |
+
|
| 244 |
+
num = 0
|
| 245 |
+
for i, cls_name in enumerate(self.class_names):
|
| 246 |
+
if num_class_obj[cls_name] > 0:
|
| 247 |
+
for _ in range(num_class_obj[cls_name]):
|
| 248 |
+
if num >= len(texts):
|
| 249 |
+
break
|
| 250 |
+
texts[num] = f"a photo with a {cls_name}"
|
| 251 |
+
num += 1
|
| 252 |
+
|
| 253 |
+
classes = np.array(classes)
|
| 254 |
+
instances.gt_classes = torch.tensor(classes, dtype=torch.int64)
|
| 255 |
+
if len(masks) == 0:
|
| 256 |
+
# Some image does not have annotation (all ignored)
|
| 257 |
+
instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1]))
|
| 258 |
+
instances.gt_bboxes = torch.zeros((0, 4))
|
| 259 |
+
else:
|
| 260 |
+
masks = BitMasks(
|
| 261 |
+
torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks])
|
| 262 |
+
)
|
| 263 |
+
instances.gt_masks = masks.tensor
|
| 264 |
+
instances.gt_bboxes = masks_to_boxes(instances.gt_masks)
|
| 265 |
+
for i in range(instances.gt_classes.shape[0]):
|
| 266 |
+
# Placeholder bounding boxes for stuff regions. Note that these are not used during training.
|
| 267 |
+
if instances.gt_classes[i].item() not in self.things:
|
| 268 |
+
instances.gt_bboxes[i] = torch.tensor([0., 0., 1., 1.])
|
| 269 |
+
return instances, texts, label
|
| 270 |
+
|
| 271 |
+
def __call__(self, dataset_dict):
|
| 272 |
+
"""
|
| 273 |
+
Args:
|
| 274 |
+
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
|
| 275 |
+
|
| 276 |
+
Returns:
|
| 277 |
+
dict: a format that builtin models in detectron2 accept
|
| 278 |
+
"""
|
| 279 |
+
assert self.is_train, "OneFormerUnifiedDatasetMapper should only be used for training!"
|
| 280 |
+
|
| 281 |
+
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
|
| 282 |
+
image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
|
| 283 |
+
utils.check_image_size(dataset_dict, image)
|
| 284 |
+
|
| 285 |
+
# semantic segmentation
|
| 286 |
+
if "sem_seg_file_name" in dataset_dict:
|
| 287 |
+
# PyTorch transformation not implemented for uint16, so converting it to double first
|
| 288 |
+
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double")
|
| 289 |
+
else:
|
| 290 |
+
sem_seg_gt = None
|
| 291 |
+
|
| 292 |
+
# panoptic segmentation
|
| 293 |
+
if "pan_seg_file_name" in dataset_dict:
|
| 294 |
+
pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB")
|
| 295 |
+
segments_info = dataset_dict["segments_info"]
|
| 296 |
+
else:
|
| 297 |
+
pan_seg_gt = None
|
| 298 |
+
segments_info = None
|
| 299 |
+
|
| 300 |
+
if pan_seg_gt is None:
|
| 301 |
+
raise ValueError(
|
| 302 |
+
"Cannot find 'pan_seg_file_name' for panoptic segmentation dataset {}.".format(
|
| 303 |
+
dataset_dict["file_name"]
|
| 304 |
+
)
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
|
| 308 |
+
aug_input, transforms = T.apply_transform_gens(self.tfm_gens, aug_input)
|
| 309 |
+
image = aug_input.image
|
| 310 |
+
if sem_seg_gt is not None:
|
| 311 |
+
sem_seg_gt = aug_input.sem_seg
|
| 312 |
+
|
| 313 |
+
# apply the same transformation to panoptic segmentation
|
| 314 |
+
pan_seg_gt = transforms.apply_segmentation(pan_seg_gt)
|
| 315 |
+
|
| 316 |
+
from panopticapi.utils import rgb2id
|
| 317 |
+
|
| 318 |
+
pan_seg_gt = rgb2id(pan_seg_gt)
|
| 319 |
+
|
| 320 |
+
# Pad image and segmentation label here!
|
| 321 |
+
image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
|
| 322 |
+
if sem_seg_gt is not None:
|
| 323 |
+
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
|
| 324 |
+
pan_seg_gt = torch.as_tensor(pan_seg_gt.astype("long"))
|
| 325 |
+
|
| 326 |
+
if self.size_divisibility > 0:
|
| 327 |
+
image_size = (image.shape[-2], image.shape[-1])
|
| 328 |
+
padding_size = [
|
| 329 |
+
0,
|
| 330 |
+
self.size_divisibility - image_size[1],
|
| 331 |
+
0,
|
| 332 |
+
self.size_divisibility - image_size[0],
|
| 333 |
+
]
|
| 334 |
+
image = F.pad(image, padding_size, value=128).contiguous()
|
| 335 |
+
if sem_seg_gt is not None:
|
| 336 |
+
sem_seg_gt = F.pad(sem_seg_gt, padding_size, value=self.ignore_label).contiguous()
|
| 337 |
+
pan_seg_gt = F.pad(
|
| 338 |
+
pan_seg_gt, padding_size, value=0
|
| 339 |
+
).contiguous() # 0 is the VOID panoptic label
|
| 340 |
+
|
| 341 |
+
image_shape = (image.shape[-2], image.shape[-1]) # h, w
|
| 342 |
+
|
| 343 |
+
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
|
| 344 |
+
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
|
| 345 |
+
# Therefore it's important to use torch.Tensor.
|
| 346 |
+
dataset_dict["image"] = image
|
| 347 |
+
|
| 348 |
+
if "annotations" in dataset_dict:
|
| 349 |
+
raise ValueError("Pemantic segmentation dataset should not have 'annotations'.")
|
| 350 |
+
|
| 351 |
+
prob_task = np.random.uniform(0,1.)
|
| 352 |
+
|
| 353 |
+
num_class_obj = {}
|
| 354 |
+
|
| 355 |
+
for name in self.class_names:
|
| 356 |
+
num_class_obj[name] = 0
|
| 357 |
+
|
| 358 |
+
if prob_task < self.semantic_prob:
|
| 359 |
+
task = "The task is semantic"
|
| 360 |
+
instances, text, sem_seg = self._get_semantic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
|
| 361 |
+
elif prob_task < self.instance_prob:
|
| 362 |
+
task = "The task is instance"
|
| 363 |
+
instances, text, sem_seg = self._get_instance_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
|
| 364 |
+
else:
|
| 365 |
+
task = "The task is panoptic"
|
| 366 |
+
instances, text, sem_seg = self._get_panoptic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj)
|
| 367 |
+
|
| 368 |
+
dataset_dict["sem_seg"] = torch.from_numpy(sem_seg).long()
|
| 369 |
+
dataset_dict["instances"] = instances
|
| 370 |
+
dataset_dict["orig_shape"] = image_shape
|
| 371 |
+
dataset_dict["task"] = task
|
| 372 |
+
dataset_dict["text"] = text
|
| 373 |
+
dataset_dict["thing_ids"] = self.things
|
| 374 |
+
|
| 375 |
+
return dataset_dict
|
RAVE-main/annotator/oneformer/oneformer/data/datasets/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import (
|
| 2 |
+
register_ade20k_panoptic,
|
| 3 |
+
register_cityscapes_panoptic,
|
| 4 |
+
register_coco_panoptic_annos_semseg,
|
| 5 |
+
register_ade20k_instance,
|
| 6 |
+
register_coco_panoptic2instance,
|
| 7 |
+
)
|
RAVE-main/annotator/oneformer/oneformer/data/datasets/register_ade20k_instance.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/datasets/register_ade20k_instance.py
|
| 3 |
+
# ------------------------------------------------------------------------------
|
| 4 |
+
|
| 5 |
+
import json
|
| 6 |
+
import logging
|
| 7 |
+
import numpy as np
|
| 8 |
+
import os
|
| 9 |
+
from PIL import Image
|
| 10 |
+
|
| 11 |
+
from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
|
| 12 |
+
from annotator.oneformer.detectron2.data.datasets.coco import load_coco_json, register_coco_instances
|
| 13 |
+
from annotator.oneformer.detectron2.utils.file_io import PathManager
|
| 14 |
+
|
| 15 |
+
ADE_CATEGORIES = [{'id': 7, 'name': 'bed'}, {'id': 8, 'name': 'windowpane'}, {'id': 10, 'name': 'cabinet'}, {'id': 12, 'name': 'person'}, {'id': 14, 'name': 'door'}, {'id': 15, 'name': 'table'}, {'id': 18, 'name': 'curtain'}, {'id': 19, 'name': 'chair'}, {'id': 20, 'name': 'car'}, {'id': 22, 'name': 'painting'}, {'id': 23, 'name': 'sofa'}, {'id': 24, 'name': 'shelf'}, {'id': 27, 'name': 'mirror'}, {'id': 30, 'name': 'armchair'}, {'id': 31, 'name': 'seat'}, {'id': 32, 'name': 'fence'}, {'id': 33, 'name': 'desk'}, {'id': 35, 'name': 'wardrobe'}, {'id': 36, 'name': 'lamp'}, {'id': 37, 'name': 'bathtub'}, {'id': 38, 'name': 'railing'}, {'id': 39, 'name': 'cushion'}, {'id': 41, 'name': 'box'}, {'id': 42, 'name': 'column'}, {'id': 43, 'name': 'signboard'}, {'id': 44, 'name': 'chest of drawers'}, {'id': 45, 'name': 'counter'}, {'id': 47, 'name': 'sink'}, {'id': 49, 'name': 'fireplace'}, {'id': 50, 'name': 'refrigerator'}, {'id': 53, 'name': 'stairs'}, {'id': 55, 'name': 'case'}, {'id': 56, 'name': 'pool table'}, {'id': 57, 'name': 'pillow'}, {'id': 58, 'name': 'screen door'}, {'id': 62, 'name': 'bookcase'}, {'id': 64, 'name': 'coffee table'}, {'id': 65, 'name': 'toilet'}, {'id': 66, 'name': 'flower'}, {'id': 67, 'name': 'book'}, {'id': 69, 'name': 'bench'}, {'id': 70, 'name': 'countertop'}, {'id': 71, 'name': 'stove'}, {'id': 72, 'name': 'palm'}, {'id': 73, 'name': 'kitchen island'}, {'id': 74, 'name': 'computer'}, {'id': 75, 'name': 'swivel chair'}, {'id': 76, 'name': 'boat'}, {'id': 78, 'name': 'arcade machine'}, {'id': 80, 'name': 'bus'}, {'id': 81, 'name': 'towel'}, {'id': 82, 'name': 'light'}, {'id': 83, 'name': 'truck'}, {'id': 85, 'name': 'chandelier'}, {'id': 86, 'name': 'awning'}, {'id': 87, 'name': 'streetlight'}, {'id': 88, 'name': 'booth'}, {'id': 89, 'name': 'television receiver'}, {'id': 90, 'name': 'airplane'}, {'id': 92, 'name': 'apparel'}, {'id': 93, 'name': 'pole'}, {'id': 95, 'name': 'bannister'}, {'id': 97, 'name': 'ottoman'}, {'id': 98, 'name': 'bottle'}, {'id': 102, 'name': 'van'}, {'id': 103, 'name': 'ship'}, {'id': 104, 'name': 'fountain'}, {'id': 107, 'name': 'washer'}, {'id': 108, 'name': 'plaything'}, {'id': 110, 'name': 'stool'}, {'id': 111, 'name': 'barrel'}, {'id': 112, 'name': 'basket'}, {'id': 115, 'name': 'bag'}, {'id': 116, 'name': 'minibike'}, {'id': 118, 'name': 'oven'}, {'id': 119, 'name': 'ball'}, {'id': 120, 'name': 'food'}, {'id': 121, 'name': 'step'}, {'id': 123, 'name': 'trade name'}, {'id': 124, 'name': 'microwave'}, {'id': 125, 'name': 'pot'}, {'id': 126, 'name': 'animal'}, {'id': 127, 'name': 'bicycle'}, {'id': 129, 'name': 'dishwasher'}, {'id': 130, 'name': 'screen'}, {'id': 132, 'name': 'sculpture'}, {'id': 133, 'name': 'hood'}, {'id': 134, 'name': 'sconce'}, {'id': 135, 'name': 'vase'}, {'id': 136, 'name': 'traffic light'}, {'id': 137, 'name': 'tray'}, {'id': 138, 'name': 'ashcan'}, {'id': 139, 'name': 'fan'}, {'id': 142, 'name': 'plate'}, {'id': 143, 'name': 'monitor'}, {'id': 144, 'name': 'bulletin board'}, {'id': 146, 'name': 'radiator'}, {'id': 147, 'name': 'glass'}, {'id': 148, 'name': 'clock'}, {'id': 149, 'name': 'flag'}]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
_PREDEFINED_SPLITS = {
|
| 19 |
+
# point annotations without masks
|
| 20 |
+
"ade20k_instance_train": (
|
| 21 |
+
"ADEChallengeData2016/images/training",
|
| 22 |
+
"ADEChallengeData2016/ade20k_instance_train.json",
|
| 23 |
+
),
|
| 24 |
+
"ade20k_instance_val": (
|
| 25 |
+
"ADEChallengeData2016/images/validation",
|
| 26 |
+
"ADEChallengeData2016/ade20k_instance_val.json",
|
| 27 |
+
),
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def _get_ade_instances_meta():
|
| 32 |
+
thing_ids = [k["id"] for k in ADE_CATEGORIES]
|
| 33 |
+
assert len(thing_ids) == 100, len(thing_ids)
|
| 34 |
+
# Mapping from the incontiguous ADE category id to an id in [0, 99]
|
| 35 |
+
thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
|
| 36 |
+
thing_classes = [k["name"] for k in ADE_CATEGORIES]
|
| 37 |
+
ret = {
|
| 38 |
+
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
|
| 39 |
+
"thing_classes": thing_classes,
|
| 40 |
+
}
|
| 41 |
+
return ret
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def register_all_ade20k_instance(root):
|
| 45 |
+
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
|
| 46 |
+
# Assume pre-defined datasets live in `./datasets`.
|
| 47 |
+
register_coco_instances(
|
| 48 |
+
key,
|
| 49 |
+
_get_ade_instances_meta(),
|
| 50 |
+
os.path.join(root, json_file) if "://" not in json_file else json_file,
|
| 51 |
+
os.path.join(root, image_root),
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
|
| 56 |
+
register_all_ade20k_instance(_root)
|
RAVE-main/annotator/oneformer/oneformer/data/datasets/register_ade20k_panoptic.py
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/datasets/register_ade20k_panoptic.py
|
| 3 |
+
# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
|
| 4 |
+
# ------------------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
|
| 10 |
+
from annotator.oneformer.detectron2.utils.file_io import PathManager
|
| 11 |
+
|
| 12 |
+
ADE20K_150_CATEGORIES = [
|
| 13 |
+
{"color": [120, 120, 120], "id": 0, "isthing": 0, "name": "wall"},
|
| 14 |
+
{"color": [180, 120, 120], "id": 1, "isthing": 0, "name": "building"},
|
| 15 |
+
{"color": [6, 230, 230], "id": 2, "isthing": 0, "name": "sky"},
|
| 16 |
+
{"color": [80, 50, 50], "id": 3, "isthing": 0, "name": "floor"},
|
| 17 |
+
{"color": [4, 200, 3], "id": 4, "isthing": 0, "name": "tree"},
|
| 18 |
+
{"color": [120, 120, 80], "id": 5, "isthing": 0, "name": "ceiling"},
|
| 19 |
+
{"color": [140, 140, 140], "id": 6, "isthing": 0, "name": "road, route"},
|
| 20 |
+
{"color": [204, 5, 255], "id": 7, "isthing": 1, "name": "bed"},
|
| 21 |
+
{"color": [230, 230, 230], "id": 8, "isthing": 1, "name": "window "},
|
| 22 |
+
{"color": [4, 250, 7], "id": 9, "isthing": 0, "name": "grass"},
|
| 23 |
+
{"color": [224, 5, 255], "id": 10, "isthing": 1, "name": "cabinet"},
|
| 24 |
+
{"color": [235, 255, 7], "id": 11, "isthing": 0, "name": "sidewalk, pavement"},
|
| 25 |
+
{"color": [150, 5, 61], "id": 12, "isthing": 1, "name": "person"},
|
| 26 |
+
{"color": [120, 120, 70], "id": 13, "isthing": 0, "name": "earth, ground"},
|
| 27 |
+
{"color": [8, 255, 51], "id": 14, "isthing": 1, "name": "door"},
|
| 28 |
+
{"color": [255, 6, 82], "id": 15, "isthing": 1, "name": "table"},
|
| 29 |
+
{"color": [143, 255, 140], "id": 16, "isthing": 0, "name": "mountain, mount"},
|
| 30 |
+
{"color": [204, 255, 4], "id": 17, "isthing": 0, "name": "plant"},
|
| 31 |
+
{"color": [255, 51, 7], "id": 18, "isthing": 1, "name": "curtain"},
|
| 32 |
+
{"color": [204, 70, 3], "id": 19, "isthing": 1, "name": "chair"},
|
| 33 |
+
{"color": [0, 102, 200], "id": 20, "isthing": 1, "name": "car"},
|
| 34 |
+
{"color": [61, 230, 250], "id": 21, "isthing": 0, "name": "water"},
|
| 35 |
+
{"color": [255, 6, 51], "id": 22, "isthing": 1, "name": "painting, picture"},
|
| 36 |
+
{"color": [11, 102, 255], "id": 23, "isthing": 1, "name": "sofa"},
|
| 37 |
+
{"color": [255, 7, 71], "id": 24, "isthing": 1, "name": "shelf"},
|
| 38 |
+
{"color": [255, 9, 224], "id": 25, "isthing": 0, "name": "house"},
|
| 39 |
+
{"color": [9, 7, 230], "id": 26, "isthing": 0, "name": "sea"},
|
| 40 |
+
{"color": [220, 220, 220], "id": 27, "isthing": 1, "name": "mirror"},
|
| 41 |
+
{"color": [255, 9, 92], "id": 28, "isthing": 0, "name": "rug"},
|
| 42 |
+
{"color": [112, 9, 255], "id": 29, "isthing": 0, "name": "field"},
|
| 43 |
+
{"color": [8, 255, 214], "id": 30, "isthing": 1, "name": "armchair"},
|
| 44 |
+
{"color": [7, 255, 224], "id": 31, "isthing": 1, "name": "seat"},
|
| 45 |
+
{"color": [255, 184, 6], "id": 32, "isthing": 1, "name": "fence"},
|
| 46 |
+
{"color": [10, 255, 71], "id": 33, "isthing": 1, "name": "desk"},
|
| 47 |
+
{"color": [255, 41, 10], "id": 34, "isthing": 0, "name": "rock, stone"},
|
| 48 |
+
{"color": [7, 255, 255], "id": 35, "isthing": 1, "name": "wardrobe, closet, press"},
|
| 49 |
+
{"color": [224, 255, 8], "id": 36, "isthing": 1, "name": "lamp"},
|
| 50 |
+
{"color": [102, 8, 255], "id": 37, "isthing": 1, "name": "tub"},
|
| 51 |
+
{"color": [255, 61, 6], "id": 38, "isthing": 1, "name": "rail"},
|
| 52 |
+
{"color": [255, 194, 7], "id": 39, "isthing": 1, "name": "cushion"},
|
| 53 |
+
{"color": [255, 122, 8], "id": 40, "isthing": 0, "name": "base, pedestal, stand"},
|
| 54 |
+
{"color": [0, 255, 20], "id": 41, "isthing": 1, "name": "box"},
|
| 55 |
+
{"color": [255, 8, 41], "id": 42, "isthing": 1, "name": "column, pillar"},
|
| 56 |
+
{"color": [255, 5, 153], "id": 43, "isthing": 1, "name": "signboard, sign"},
|
| 57 |
+
{
|
| 58 |
+
"color": [6, 51, 255],
|
| 59 |
+
"id": 44,
|
| 60 |
+
"isthing": 1,
|
| 61 |
+
"name": "chest of drawers, chest, bureau, dresser",
|
| 62 |
+
},
|
| 63 |
+
{"color": [235, 12, 255], "id": 45, "isthing": 1, "name": "counter"},
|
| 64 |
+
{"color": [160, 150, 20], "id": 46, "isthing": 0, "name": "sand"},
|
| 65 |
+
{"color": [0, 163, 255], "id": 47, "isthing": 1, "name": "sink"},
|
| 66 |
+
{"color": [140, 140, 140], "id": 48, "isthing": 0, "name": "skyscraper"},
|
| 67 |
+
{"color": [250, 10, 15], "id": 49, "isthing": 1, "name": "fireplace"},
|
| 68 |
+
{"color": [20, 255, 0], "id": 50, "isthing": 1, "name": "refrigerator, icebox"},
|
| 69 |
+
{"color": [31, 255, 0], "id": 51, "isthing": 0, "name": "grandstand, covered stand"},
|
| 70 |
+
{"color": [255, 31, 0], "id": 52, "isthing": 0, "name": "path"},
|
| 71 |
+
{"color": [255, 224, 0], "id": 53, "isthing": 1, "name": "stairs"},
|
| 72 |
+
{"color": [153, 255, 0], "id": 54, "isthing": 0, "name": "runway"},
|
| 73 |
+
{"color": [0, 0, 255], "id": 55, "isthing": 1, "name": "case, display case, showcase, vitrine"},
|
| 74 |
+
{
|
| 75 |
+
"color": [255, 71, 0],
|
| 76 |
+
"id": 56,
|
| 77 |
+
"isthing": 1,
|
| 78 |
+
"name": "pool table, billiard table, snooker table",
|
| 79 |
+
},
|
| 80 |
+
{"color": [0, 235, 255], "id": 57, "isthing": 1, "name": "pillow"},
|
| 81 |
+
{"color": [0, 173, 255], "id": 58, "isthing": 1, "name": "screen door, screen"},
|
| 82 |
+
{"color": [31, 0, 255], "id": 59, "isthing": 0, "name": "stairway, staircase"},
|
| 83 |
+
{"color": [11, 200, 200], "id": 60, "isthing": 0, "name": "river"},
|
| 84 |
+
{"color": [255, 82, 0], "id": 61, "isthing": 0, "name": "bridge, span"},
|
| 85 |
+
{"color": [0, 255, 245], "id": 62, "isthing": 1, "name": "bookcase"},
|
| 86 |
+
{"color": [0, 61, 255], "id": 63, "isthing": 0, "name": "blind, screen"},
|
| 87 |
+
{"color": [0, 255, 112], "id": 64, "isthing": 1, "name": "coffee table"},
|
| 88 |
+
{
|
| 89 |
+
"color": [0, 255, 133],
|
| 90 |
+
"id": 65,
|
| 91 |
+
"isthing": 1,
|
| 92 |
+
"name": "toilet, can, commode, crapper, pot, potty, stool, throne",
|
| 93 |
+
},
|
| 94 |
+
{"color": [255, 0, 0], "id": 66, "isthing": 1, "name": "flower"},
|
| 95 |
+
{"color": [255, 163, 0], "id": 67, "isthing": 1, "name": "book"},
|
| 96 |
+
{"color": [255, 102, 0], "id": 68, "isthing": 0, "name": "hill"},
|
| 97 |
+
{"color": [194, 255, 0], "id": 69, "isthing": 1, "name": "bench"},
|
| 98 |
+
{"color": [0, 143, 255], "id": 70, "isthing": 1, "name": "countertop"},
|
| 99 |
+
{"color": [51, 255, 0], "id": 71, "isthing": 1, "name": "stove"},
|
| 100 |
+
{"color": [0, 82, 255], "id": 72, "isthing": 1, "name": "palm, palm tree"},
|
| 101 |
+
{"color": [0, 255, 41], "id": 73, "isthing": 1, "name": "kitchen island"},
|
| 102 |
+
{"color": [0, 255, 173], "id": 74, "isthing": 1, "name": "computer"},
|
| 103 |
+
{"color": [10, 0, 255], "id": 75, "isthing": 1, "name": "swivel chair"},
|
| 104 |
+
{"color": [173, 255, 0], "id": 76, "isthing": 1, "name": "boat"},
|
| 105 |
+
{"color": [0, 255, 153], "id": 77, "isthing": 0, "name": "bar"},
|
| 106 |
+
{"color": [255, 92, 0], "id": 78, "isthing": 1, "name": "arcade machine"},
|
| 107 |
+
{"color": [255, 0, 255], "id": 79, "isthing": 0, "name": "hovel, hut, hutch, shack, shanty"},
|
| 108 |
+
{"color": [255, 0, 245], "id": 80, "isthing": 1, "name": "bus"},
|
| 109 |
+
{"color": [255, 0, 102], "id": 81, "isthing": 1, "name": "towel"},
|
| 110 |
+
{"color": [255, 173, 0], "id": 82, "isthing": 1, "name": "light"},
|
| 111 |
+
{"color": [255, 0, 20], "id": 83, "isthing": 1, "name": "truck"},
|
| 112 |
+
{"color": [255, 184, 184], "id": 84, "isthing": 0, "name": "tower"},
|
| 113 |
+
{"color": [0, 31, 255], "id": 85, "isthing": 1, "name": "chandelier"},
|
| 114 |
+
{"color": [0, 255, 61], "id": 86, "isthing": 1, "name": "awning, sunshade, sunblind"},
|
| 115 |
+
{"color": [0, 71, 255], "id": 87, "isthing": 1, "name": "street lamp"},
|
| 116 |
+
{"color": [255, 0, 204], "id": 88, "isthing": 1, "name": "booth"},
|
| 117 |
+
{"color": [0, 255, 194], "id": 89, "isthing": 1, "name": "tv"},
|
| 118 |
+
{"color": [0, 255, 82], "id": 90, "isthing": 1, "name": "plane"},
|
| 119 |
+
{"color": [0, 10, 255], "id": 91, "isthing": 0, "name": "dirt track"},
|
| 120 |
+
{"color": [0, 112, 255], "id": 92, "isthing": 1, "name": "clothes"},
|
| 121 |
+
{"color": [51, 0, 255], "id": 93, "isthing": 1, "name": "pole"},
|
| 122 |
+
{"color": [0, 194, 255], "id": 94, "isthing": 0, "name": "land, ground, soil"},
|
| 123 |
+
{
|
| 124 |
+
"color": [0, 122, 255],
|
| 125 |
+
"id": 95,
|
| 126 |
+
"isthing": 1,
|
| 127 |
+
"name": "bannister, banister, balustrade, balusters, handrail",
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"color": [0, 255, 163],
|
| 131 |
+
"id": 96,
|
| 132 |
+
"isthing": 0,
|
| 133 |
+
"name": "escalator, moving staircase, moving stairway",
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"color": [255, 153, 0],
|
| 137 |
+
"id": 97,
|
| 138 |
+
"isthing": 1,
|
| 139 |
+
"name": "ottoman, pouf, pouffe, puff, hassock",
|
| 140 |
+
},
|
| 141 |
+
{"color": [0, 255, 10], "id": 98, "isthing": 1, "name": "bottle"},
|
| 142 |
+
{"color": [255, 112, 0], "id": 99, "isthing": 0, "name": "buffet, counter, sideboard"},
|
| 143 |
+
{
|
| 144 |
+
"color": [143, 255, 0],
|
| 145 |
+
"id": 100,
|
| 146 |
+
"isthing": 0,
|
| 147 |
+
"name": "poster, posting, placard, notice, bill, card",
|
| 148 |
+
},
|
| 149 |
+
{"color": [82, 0, 255], "id": 101, "isthing": 0, "name": "stage"},
|
| 150 |
+
{"color": [163, 255, 0], "id": 102, "isthing": 1, "name": "van"},
|
| 151 |
+
{"color": [255, 235, 0], "id": 103, "isthing": 1, "name": "ship"},
|
| 152 |
+
{"color": [8, 184, 170], "id": 104, "isthing": 1, "name": "fountain"},
|
| 153 |
+
{
|
| 154 |
+
"color": [133, 0, 255],
|
| 155 |
+
"id": 105,
|
| 156 |
+
"isthing": 0,
|
| 157 |
+
"name": "conveyer belt, conveyor belt, conveyer, conveyor, transporter",
|
| 158 |
+
},
|
| 159 |
+
{"color": [0, 255, 92], "id": 106, "isthing": 0, "name": "canopy"},
|
| 160 |
+
{
|
| 161 |
+
"color": [184, 0, 255],
|
| 162 |
+
"id": 107,
|
| 163 |
+
"isthing": 1,
|
| 164 |
+
"name": "washer, automatic washer, washing machine",
|
| 165 |
+
},
|
| 166 |
+
{"color": [255, 0, 31], "id": 108, "isthing": 1, "name": "plaything, toy"},
|
| 167 |
+
{"color": [0, 184, 255], "id": 109, "isthing": 0, "name": "pool"},
|
| 168 |
+
{"color": [0, 214, 255], "id": 110, "isthing": 1, "name": "stool"},
|
| 169 |
+
{"color": [255, 0, 112], "id": 111, "isthing": 1, "name": "barrel, cask"},
|
| 170 |
+
{"color": [92, 255, 0], "id": 112, "isthing": 1, "name": "basket, handbasket"},
|
| 171 |
+
{"color": [0, 224, 255], "id": 113, "isthing": 0, "name": "falls"},
|
| 172 |
+
{"color": [112, 224, 255], "id": 114, "isthing": 0, "name": "tent"},
|
| 173 |
+
{"color": [70, 184, 160], "id": 115, "isthing": 1, "name": "bag"},
|
| 174 |
+
{"color": [163, 0, 255], "id": 116, "isthing": 1, "name": "minibike, motorbike"},
|
| 175 |
+
{"color": [153, 0, 255], "id": 117, "isthing": 0, "name": "cradle"},
|
| 176 |
+
{"color": [71, 255, 0], "id": 118, "isthing": 1, "name": "oven"},
|
| 177 |
+
{"color": [255, 0, 163], "id": 119, "isthing": 1, "name": "ball"},
|
| 178 |
+
{"color": [255, 204, 0], "id": 120, "isthing": 1, "name": "food, solid food"},
|
| 179 |
+
{"color": [255, 0, 143], "id": 121, "isthing": 1, "name": "step, stair"},
|
| 180 |
+
{"color": [0, 255, 235], "id": 122, "isthing": 0, "name": "tank, storage tank"},
|
| 181 |
+
{"color": [133, 255, 0], "id": 123, "isthing": 1, "name": "trade name"},
|
| 182 |
+
{"color": [255, 0, 235], "id": 124, "isthing": 1, "name": "microwave"},
|
| 183 |
+
{"color": [245, 0, 255], "id": 125, "isthing": 1, "name": "pot"},
|
| 184 |
+
{"color": [255, 0, 122], "id": 126, "isthing": 1, "name": "animal"},
|
| 185 |
+
{"color": [255, 245, 0], "id": 127, "isthing": 1, "name": "bicycle"},
|
| 186 |
+
{"color": [10, 190, 212], "id": 128, "isthing": 0, "name": "lake"},
|
| 187 |
+
{"color": [214, 255, 0], "id": 129, "isthing": 1, "name": "dishwasher"},
|
| 188 |
+
{"color": [0, 204, 255], "id": 130, "isthing": 1, "name": "screen"},
|
| 189 |
+
{"color": [20, 0, 255], "id": 131, "isthing": 0, "name": "blanket, cover"},
|
| 190 |
+
{"color": [255, 255, 0], "id": 132, "isthing": 1, "name": "sculpture"},
|
| 191 |
+
{"color": [0, 153, 255], "id": 133, "isthing": 1, "name": "hood, exhaust hood"},
|
| 192 |
+
{"color": [0, 41, 255], "id": 134, "isthing": 1, "name": "sconce"},
|
| 193 |
+
{"color": [0, 255, 204], "id": 135, "isthing": 1, "name": "vase"},
|
| 194 |
+
{"color": [41, 0, 255], "id": 136, "isthing": 1, "name": "traffic light"},
|
| 195 |
+
{"color": [41, 255, 0], "id": 137, "isthing": 1, "name": "tray"},
|
| 196 |
+
{"color": [173, 0, 255], "id": 138, "isthing": 1, "name": "trash can"},
|
| 197 |
+
{"color": [0, 245, 255], "id": 139, "isthing": 1, "name": "fan"},
|
| 198 |
+
{"color": [71, 0, 255], "id": 140, "isthing": 0, "name": "pier"},
|
| 199 |
+
{"color": [122, 0, 255], "id": 141, "isthing": 0, "name": "crt screen"},
|
| 200 |
+
{"color": [0, 255, 184], "id": 142, "isthing": 1, "name": "plate"},
|
| 201 |
+
{"color": [0, 92, 255], "id": 143, "isthing": 1, "name": "monitor"},
|
| 202 |
+
{"color": [184, 255, 0], "id": 144, "isthing": 1, "name": "bulletin board"},
|
| 203 |
+
{"color": [0, 133, 255], "id": 145, "isthing": 0, "name": "shower"},
|
| 204 |
+
{"color": [255, 214, 0], "id": 146, "isthing": 1, "name": "radiator"},
|
| 205 |
+
{"color": [25, 194, 194], "id": 147, "isthing": 1, "name": "glass, drinking glass"},
|
| 206 |
+
{"color": [102, 255, 0], "id": 148, "isthing": 1, "name": "clock"},
|
| 207 |
+
{"color": [92, 0, 255], "id": 149, "isthing": 1, "name": "flag"},
|
| 208 |
+
]
|
| 209 |
+
|
| 210 |
+
ADE20k_COLORS = [k["color"] for k in ADE20K_150_CATEGORIES]
|
| 211 |
+
|
| 212 |
+
MetadataCatalog.get("ade20k_sem_seg_train").set(
|
| 213 |
+
stuff_colors=ADE20k_COLORS[:],
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
MetadataCatalog.get("ade20k_sem_seg_val").set(
|
| 217 |
+
stuff_colors=ADE20k_COLORS[:],
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def load_ade20k_panoptic_json(json_file, image_dir, gt_dir, semseg_dir, meta):
|
| 222 |
+
"""
|
| 223 |
+
Args:
|
| 224 |
+
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
|
| 225 |
+
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
|
| 226 |
+
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
|
| 227 |
+
Returns:
|
| 228 |
+
list[dict]: a list of dicts in Detectron2 standard format. (See
|
| 229 |
+
`Using Custom Datasets </tutorials/datasets.html>`_ )
|
| 230 |
+
"""
|
| 231 |
+
|
| 232 |
+
def _convert_category_id(segment_info, meta):
|
| 233 |
+
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
|
| 234 |
+
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
|
| 235 |
+
segment_info["category_id"]
|
| 236 |
+
]
|
| 237 |
+
segment_info["isthing"] = True
|
| 238 |
+
else:
|
| 239 |
+
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
|
| 240 |
+
segment_info["category_id"]
|
| 241 |
+
]
|
| 242 |
+
segment_info["isthing"] = False
|
| 243 |
+
return segment_info
|
| 244 |
+
|
| 245 |
+
with PathManager.open(json_file) as f:
|
| 246 |
+
json_info = json.load(f)
|
| 247 |
+
|
| 248 |
+
ret = []
|
| 249 |
+
for ann in json_info["annotations"]:
|
| 250 |
+
image_id = ann["image_id"]
|
| 251 |
+
# TODO: currently we assume image and label has the same filename but
|
| 252 |
+
# different extension, and images have extension ".jpg" for COCO. Need
|
| 253 |
+
# to make image extension a user-provided argument if we extend this
|
| 254 |
+
# function to support other COCO-like datasets.
|
| 255 |
+
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
|
| 256 |
+
label_file = os.path.join(gt_dir, ann["file_name"])
|
| 257 |
+
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
|
| 258 |
+
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
|
| 259 |
+
ret.append(
|
| 260 |
+
{
|
| 261 |
+
"file_name": image_file,
|
| 262 |
+
"image_id": image_id,
|
| 263 |
+
"pan_seg_file_name": label_file,
|
| 264 |
+
"sem_seg_file_name": sem_label_file,
|
| 265 |
+
"segments_info": segments_info,
|
| 266 |
+
}
|
| 267 |
+
)
|
| 268 |
+
assert len(ret), f"No images found in {image_dir}!"
|
| 269 |
+
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
|
| 270 |
+
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
|
| 271 |
+
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
|
| 272 |
+
return ret
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def register_ade20k_panoptic(
|
| 276 |
+
name, metadata, image_root, panoptic_root, semantic_root, panoptic_json, instances_json=None,
|
| 277 |
+
):
|
| 278 |
+
"""
|
| 279 |
+
Register a "standard" version of ADE20k panoptic segmentation dataset named `name`.
|
| 280 |
+
The dictionaries in this registered dataset follows detectron2's standard format.
|
| 281 |
+
Hence it's called "standard".
|
| 282 |
+
Args:
|
| 283 |
+
name (str): the name that identifies a dataset,
|
| 284 |
+
e.g. "ade20k_panoptic_train"
|
| 285 |
+
metadata (dict): extra metadata associated with this dataset.
|
| 286 |
+
image_root (str): directory which contains all the images
|
| 287 |
+
panoptic_root (str): directory which contains panoptic annotation images in COCO format
|
| 288 |
+
panoptic_json (str): path to the json panoptic annotation file in COCO format
|
| 289 |
+
sem_seg_root (none): not used, to be consistent with
|
| 290 |
+
`register_coco_panoptic_separated`.
|
| 291 |
+
instances_json (str): path to the json instance annotation file
|
| 292 |
+
"""
|
| 293 |
+
panoptic_name = name
|
| 294 |
+
DatasetCatalog.register(
|
| 295 |
+
panoptic_name,
|
| 296 |
+
lambda: load_ade20k_panoptic_json(
|
| 297 |
+
panoptic_json, image_root, panoptic_root, semantic_root, metadata
|
| 298 |
+
),
|
| 299 |
+
)
|
| 300 |
+
MetadataCatalog.get(panoptic_name).set(
|
| 301 |
+
panoptic_root=panoptic_root,
|
| 302 |
+
image_root=image_root,
|
| 303 |
+
panoptic_json=panoptic_json,
|
| 304 |
+
json_file=instances_json,
|
| 305 |
+
evaluator_type="ade20k_panoptic_seg",
|
| 306 |
+
ignore_label=255,
|
| 307 |
+
label_divisor=1000,
|
| 308 |
+
**metadata,
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
_PREDEFINED_SPLITS_ADE20K_PANOPTIC = {
|
| 313 |
+
"ade20k_panoptic_train": (
|
| 314 |
+
"ADEChallengeData2016/images/training",
|
| 315 |
+
"ADEChallengeData2016/ade20k_panoptic_train",
|
| 316 |
+
"ADEChallengeData2016/ade20k_panoptic_train.json",
|
| 317 |
+
"ADEChallengeData2016/annotations_detectron2/training",
|
| 318 |
+
"ADEChallengeData2016/ade20k_instance_train.json",
|
| 319 |
+
),
|
| 320 |
+
"ade20k_panoptic_val": (
|
| 321 |
+
"ADEChallengeData2016/images/validation",
|
| 322 |
+
"ADEChallengeData2016/ade20k_panoptic_val",
|
| 323 |
+
"ADEChallengeData2016/ade20k_panoptic_val.json",
|
| 324 |
+
"ADEChallengeData2016/annotations_detectron2/validation",
|
| 325 |
+
"ADEChallengeData2016/ade20k_instance_val.json",
|
| 326 |
+
),
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def get_metadata():
|
| 331 |
+
meta = {}
|
| 332 |
+
# The following metadata maps contiguous id from [0, #thing categories +
|
| 333 |
+
# #stuff categories) to their names and colors. We have to replica of the
|
| 334 |
+
# same name and color under "thing_*" and "stuff_*" because the current
|
| 335 |
+
# visualization function in D2 handles thing and class classes differently
|
| 336 |
+
# due to some heuristic used in Panoptic FPN. We keep the same naming to
|
| 337 |
+
# enable reusing existing visualization functions.
|
| 338 |
+
thing_classes = [k["name"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
|
| 339 |
+
thing_colors = [k["color"] for k in ADE20K_150_CATEGORIES if k["isthing"] == 1]
|
| 340 |
+
stuff_classes = [k["name"] for k in ADE20K_150_CATEGORIES]
|
| 341 |
+
stuff_colors = [k["color"] for k in ADE20K_150_CATEGORIES]
|
| 342 |
+
|
| 343 |
+
meta["thing_classes"] = thing_classes
|
| 344 |
+
meta["thing_colors"] = thing_colors
|
| 345 |
+
meta["stuff_classes"] = stuff_classes
|
| 346 |
+
meta["stuff_colors"] = stuff_colors
|
| 347 |
+
|
| 348 |
+
# Convert category id for training:
|
| 349 |
+
# category id: like semantic segmentation, it is the class id for each
|
| 350 |
+
# pixel. Since there are some classes not used in evaluation, the category
|
| 351 |
+
# id is not always contiguous and thus we have two set of category ids:
|
| 352 |
+
# - original category id: category id in the original dataset, mainly
|
| 353 |
+
# used for evaluation.
|
| 354 |
+
# - contiguous category id: [0, #classes), in order to train the linear
|
| 355 |
+
# softmax classifier.
|
| 356 |
+
thing_dataset_id_to_contiguous_id = {}
|
| 357 |
+
stuff_dataset_id_to_contiguous_id = {}
|
| 358 |
+
|
| 359 |
+
for i, cat in enumerate(ADE20K_150_CATEGORIES):
|
| 360 |
+
if cat["isthing"]:
|
| 361 |
+
thing_dataset_id_to_contiguous_id[cat["id"]] = i
|
| 362 |
+
# else:
|
| 363 |
+
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
|
| 364 |
+
|
| 365 |
+
# in order to use sem_seg evaluator
|
| 366 |
+
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
|
| 367 |
+
|
| 368 |
+
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
|
| 369 |
+
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
|
| 370 |
+
|
| 371 |
+
return meta
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def register_all_ade20k_panoptic(root):
|
| 375 |
+
metadata = get_metadata()
|
| 376 |
+
for (
|
| 377 |
+
prefix,
|
| 378 |
+
(image_root, panoptic_root, panoptic_json, semantic_root, instance_json),
|
| 379 |
+
) in _PREDEFINED_SPLITS_ADE20K_PANOPTIC.items():
|
| 380 |
+
# The "standard" version of COCO panoptic segmentation dataset,
|
| 381 |
+
# e.g. used by Panoptic-DeepLab
|
| 382 |
+
register_ade20k_panoptic(
|
| 383 |
+
prefix,
|
| 384 |
+
metadata,
|
| 385 |
+
os.path.join(root, image_root),
|
| 386 |
+
os.path.join(root, panoptic_root),
|
| 387 |
+
os.path.join(root, semantic_root),
|
| 388 |
+
os.path.join(root, panoptic_json),
|
| 389 |
+
os.path.join(root, instance_json),
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
|
| 394 |
+
register_all_ade20k_panoptic(_root)
|
RAVE-main/annotator/oneformer/oneformer/data/datasets/register_cityscapes_panoptic.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/datasets/cityscapes_panoptic.py
|
| 3 |
+
# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
|
| 4 |
+
# ------------------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import logging
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
|
| 11 |
+
from annotator.oneformer.detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
|
| 12 |
+
from annotator.oneformer.detectron2.utils.file_io import PathManager
|
| 13 |
+
|
| 14 |
+
"""
|
| 15 |
+
This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
|
| 23 |
+
files = []
|
| 24 |
+
# scan through the directory
|
| 25 |
+
cities = PathManager.ls(image_dir)
|
| 26 |
+
logger.info(f"{len(cities)} cities found in '{image_dir}'.")
|
| 27 |
+
image_dict = {}
|
| 28 |
+
for city in cities:
|
| 29 |
+
city_img_dir = os.path.join(image_dir, city)
|
| 30 |
+
for basename in PathManager.ls(city_img_dir):
|
| 31 |
+
image_file = os.path.join(city_img_dir, basename)
|
| 32 |
+
|
| 33 |
+
suffix = "_leftImg8bit.png"
|
| 34 |
+
assert basename.endswith(suffix), basename
|
| 35 |
+
basename = os.path.basename(basename)[: -len(suffix)]
|
| 36 |
+
|
| 37 |
+
image_dict[basename] = image_file
|
| 38 |
+
|
| 39 |
+
for ann in json_info["annotations"]:
|
| 40 |
+
image_file = image_dict.get(ann["image_id"], None)
|
| 41 |
+
assert image_file is not None, "No image {} found for annotation {}".format(
|
| 42 |
+
ann["image_id"], ann["file_name"]
|
| 43 |
+
)
|
| 44 |
+
label_file = os.path.join(gt_dir, ann["file_name"])
|
| 45 |
+
segments_info = ann["segments_info"]
|
| 46 |
+
files.append((image_file, label_file, segments_info))
|
| 47 |
+
|
| 48 |
+
assert len(files), "No images found in {}".format(image_dir)
|
| 49 |
+
assert PathManager.isfile(files[0][0]), files[0][0]
|
| 50 |
+
assert PathManager.isfile(files[0][1]), files[0][1]
|
| 51 |
+
return files
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
|
| 55 |
+
"""
|
| 56 |
+
Args:
|
| 57 |
+
image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
|
| 58 |
+
gt_dir (str): path to the raw annotations. e.g.,
|
| 59 |
+
"~/cityscapes/gtFine/cityscapes_panoptic_train".
|
| 60 |
+
gt_json (str): path to the json file. e.g.,
|
| 61 |
+
"~/cityscapes/gtFine/cityscapes_panoptic_train.json".
|
| 62 |
+
meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
|
| 63 |
+
and "stuff_dataset_id_to_contiguous_id" to map category ids to
|
| 64 |
+
contiguous ids for training.
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
list[dict]: a list of dicts in Detectron2 standard format. (See
|
| 68 |
+
`Using Custom Datasets </tutorials/datasets.html>`_ )
|
| 69 |
+
"""
|
| 70 |
+
|
| 71 |
+
def _convert_category_id(segment_info, meta):
|
| 72 |
+
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
|
| 73 |
+
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
|
| 74 |
+
segment_info["category_id"]
|
| 75 |
+
]
|
| 76 |
+
else:
|
| 77 |
+
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
|
| 78 |
+
segment_info["category_id"]
|
| 79 |
+
]
|
| 80 |
+
return segment_info
|
| 81 |
+
|
| 82 |
+
assert os.path.exists(
|
| 83 |
+
gt_json
|
| 84 |
+
), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
with open(gt_json) as f:
|
| 88 |
+
json_info = json.load(f)
|
| 89 |
+
|
| 90 |
+
files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
|
| 91 |
+
ret = []
|
| 92 |
+
for image_file, label_file, segments_info in files:
|
| 93 |
+
sem_label_file = (
|
| 94 |
+
image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
|
| 95 |
+
)
|
| 96 |
+
segments_info = [_convert_category_id(x, meta) for x in segments_info]
|
| 97 |
+
ret.append(
|
| 98 |
+
{
|
| 99 |
+
"file_name": image_file,
|
| 100 |
+
"image_id": "_".join(
|
| 101 |
+
os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
|
| 102 |
+
),
|
| 103 |
+
"sem_seg_file_name": sem_label_file,
|
| 104 |
+
"pan_seg_file_name": label_file,
|
| 105 |
+
"segments_info": segments_info,
|
| 106 |
+
}
|
| 107 |
+
)
|
| 108 |
+
assert len(ret), f"No images found in {image_dir}!"
|
| 109 |
+
assert PathManager.isfile(
|
| 110 |
+
ret[0]["sem_seg_file_name"]
|
| 111 |
+
), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
|
| 112 |
+
assert PathManager.isfile(
|
| 113 |
+
ret[0]["pan_seg_file_name"]
|
| 114 |
+
), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
|
| 115 |
+
return ret
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
_RAW_CITYSCAPES_PANOPTIC_SPLITS = {
|
| 119 |
+
"cityscapes_fine_panoptic_train": (
|
| 120 |
+
"cityscapes/leftImg8bit/train",
|
| 121 |
+
"cityscapes/gtFine/cityscapes_panoptic_train",
|
| 122 |
+
"cityscapes/gtFine/cityscapes_panoptic_train.json",
|
| 123 |
+
),
|
| 124 |
+
"cityscapes_fine_panoptic_val": (
|
| 125 |
+
"cityscapes/leftImg8bit/val",
|
| 126 |
+
"cityscapes/gtFine/cityscapes_panoptic_val",
|
| 127 |
+
"cityscapes/gtFine/cityscapes_panoptic_val.json",
|
| 128 |
+
),
|
| 129 |
+
# "cityscapes_fine_panoptic_test": not supported yet
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def register_all_cityscapes_panoptic(root):
|
| 134 |
+
meta = {}
|
| 135 |
+
# The following metadata maps contiguous id from [0, #thing categories +
|
| 136 |
+
# #stuff categories) to their names and colors. We have to replica of the
|
| 137 |
+
# same name and color under "thing_*" and "stuff_*" because the current
|
| 138 |
+
# visualization function in D2 handles thing and class classes differently
|
| 139 |
+
# due to some heuristic used in Panoptic FPN. We keep the same naming to
|
| 140 |
+
# enable reusing existing visualization functions.
|
| 141 |
+
thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
|
| 142 |
+
thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
|
| 143 |
+
stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
|
| 144 |
+
stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
|
| 145 |
+
|
| 146 |
+
meta["thing_classes"] = thing_classes
|
| 147 |
+
meta["thing_colors"] = thing_colors
|
| 148 |
+
meta["stuff_classes"] = stuff_classes
|
| 149 |
+
meta["stuff_colors"] = stuff_colors
|
| 150 |
+
|
| 151 |
+
# There are three types of ids in cityscapes panoptic segmentation:
|
| 152 |
+
# (1) category id: like semantic segmentation, it is the class id for each
|
| 153 |
+
# pixel. Since there are some classes not used in evaluation, the category
|
| 154 |
+
# id is not always contiguous and thus we have two set of category ids:
|
| 155 |
+
# - original category id: category id in the original dataset, mainly
|
| 156 |
+
# used for evaluation.
|
| 157 |
+
# - contiguous category id: [0, #classes), in order to train the classifier
|
| 158 |
+
# (2) instance id: this id is used to differentiate different instances from
|
| 159 |
+
# the same category. For "stuff" classes, the instance id is always 0; for
|
| 160 |
+
# "thing" classes, the instance id starts from 1 and 0 is reserved for
|
| 161 |
+
# ignored instances (e.g. crowd annotation).
|
| 162 |
+
# (3) panoptic id: this is the compact id that encode both category and
|
| 163 |
+
# instance id by: category_id * 1000 + instance_id.
|
| 164 |
+
thing_dataset_id_to_contiguous_id = {}
|
| 165 |
+
stuff_dataset_id_to_contiguous_id = {}
|
| 166 |
+
|
| 167 |
+
for k in CITYSCAPES_CATEGORIES:
|
| 168 |
+
if k["isthing"] == 1:
|
| 169 |
+
thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
|
| 170 |
+
else:
|
| 171 |
+
stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
|
| 172 |
+
|
| 173 |
+
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
|
| 174 |
+
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
|
| 175 |
+
|
| 176 |
+
for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
|
| 177 |
+
image_dir = os.path.join(root, image_dir)
|
| 178 |
+
gt_dir = os.path.join(root, gt_dir)
|
| 179 |
+
gt_json = os.path.join(root, gt_json)
|
| 180 |
+
|
| 181 |
+
if key in DatasetCatalog.list():
|
| 182 |
+
DatasetCatalog.remove(key)
|
| 183 |
+
|
| 184 |
+
DatasetCatalog.register(
|
| 185 |
+
key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
|
| 186 |
+
)
|
| 187 |
+
MetadataCatalog.get(key).set(
|
| 188 |
+
panoptic_root=gt_dir,
|
| 189 |
+
image_root=image_dir,
|
| 190 |
+
panoptic_json=gt_json,
|
| 191 |
+
gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
|
| 192 |
+
evaluator_type="cityscapes_panoptic_seg",
|
| 193 |
+
ignore_label=255,
|
| 194 |
+
label_divisor=1000,
|
| 195 |
+
**meta,
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
|
| 199 |
+
register_all_cityscapes_panoptic(_root)
|
RAVE-main/annotator/oneformer/oneformer/data/datasets/register_coco_panoptic2instance.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/datasets/builtin.py
|
| 3 |
+
# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
|
| 4 |
+
# ------------------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
This file registers pre-defined datasets at hard-coded paths, and their metadata.
|
| 9 |
+
|
| 10 |
+
We hard-code metadata for common datasets. This will enable:
|
| 11 |
+
1. Consistency check when loading the datasets
|
| 12 |
+
2. Use models on these standard datasets directly and run demos,
|
| 13 |
+
without having to download the dataset annotations
|
| 14 |
+
|
| 15 |
+
We hard-code some paths to the dataset that's assumed to
|
| 16 |
+
exist in "./datasets/".
|
| 17 |
+
|
| 18 |
+
Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
|
| 19 |
+
To add new dataset, refer to the tutorial "docs/DATASETS.md".
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
import os
|
| 23 |
+
from annotator.oneformer.detectron2.data.datasets.builtin_meta import _get_builtin_metadata
|
| 24 |
+
from annotator.oneformer.detectron2.data.datasets.coco import register_coco_instances
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
_PREDEFINED_SPLITS_COCO = {
|
| 28 |
+
"coco_2017_val_panoptic2instance": ("coco/val2017", "coco/annotations/panoptic2instances_val2017.json"),
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def register_panoptic2instances_coco(root):
|
| 33 |
+
for key, (image_root, json_file) in _PREDEFINED_SPLITS_COCO.items():
|
| 34 |
+
# Assume pre-defined datasets live in `./datasets`.
|
| 35 |
+
register_coco_instances(
|
| 36 |
+
key,
|
| 37 |
+
_get_builtin_metadata("coco"),
|
| 38 |
+
os.path.join(root, json_file) if "://" not in json_file else json_file,
|
| 39 |
+
os.path.join(root, image_root),
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
_root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
|
| 44 |
+
register_panoptic2instances_coco(_root)
|
RAVE-main/annotator/oneformer/oneformer/data/datasets/register_coco_panoptic_annos_semseg.py
ADDED
|
@@ -0,0 +1,367 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ------------------------------------------------------------------------------
|
| 2 |
+
# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/datasets/register_coco_panoptic_annos_semseg.py
|
| 3 |
+
# Modified by Jitesh Jain (https://github.com/praeclarumjj3)
|
| 4 |
+
# ------------------------------------------------------------------------------
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
|
| 10 |
+
from annotator.oneformer.detectron2.data.datasets import load_sem_seg
|
| 11 |
+
from annotator.oneformer.detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
|
| 12 |
+
from annotator.oneformer.detectron2.utils.file_io import PathManager
|
| 13 |
+
import contextlib
|
| 14 |
+
import logging
|
| 15 |
+
import io
|
| 16 |
+
from fvcore.common.timer import Timer
|
| 17 |
+
import annotator.oneformer.pycocotools.mask as mask_util
|
| 18 |
+
from annotator.oneformer.detectron2.structures import BoxMode
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
_PREDEFINED_SPLITS_COCO_PANOPTIC = {
|
| 25 |
+
"coco_2017_train_panoptic": (
|
| 26 |
+
# This is the original panoptic annotation directory
|
| 27 |
+
"coco/panoptic_train2017",
|
| 28 |
+
"coco/annotations/panoptic_train2017.json",
|
| 29 |
+
# This directory contains semantic annotations that are
|
| 30 |
+
# converted from panoptic annotations.
|
| 31 |
+
# It is used by PanopticFPN.
|
| 32 |
+
# You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
|
| 33 |
+
# to create these directories.
|
| 34 |
+
"coco/panoptic_semseg_train2017",
|
| 35 |
+
),
|
| 36 |
+
"coco_2017_val_panoptic": (
|
| 37 |
+
"coco/panoptic_val2017",
|
| 38 |
+
"coco/annotations/panoptic_val2017.json",
|
| 39 |
+
"coco/panoptic_semseg_val2017",
|
| 40 |
+
),
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
def load_coco_instance_json(json_file, image_root, dataset_name=None):
|
| 44 |
+
from annotator.oneformer.pycocotools.coco import COCO
|
| 45 |
+
|
| 46 |
+
timer = Timer()
|
| 47 |
+
json_file = PathManager.get_local_path(json_file)
|
| 48 |
+
with contextlib.redirect_stdout(io.StringIO()):
|
| 49 |
+
coco_api = COCO(json_file)
|
| 50 |
+
if timer.seconds() > 1:
|
| 51 |
+
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
|
| 52 |
+
|
| 53 |
+
id_map = None
|
| 54 |
+
if dataset_name is not None:
|
| 55 |
+
meta = MetadataCatalog.get(dataset_name)
|
| 56 |
+
cat_ids = sorted(coco_api.getCatIds())
|
| 57 |
+
cats = coco_api.loadCats(cat_ids)
|
| 58 |
+
# The categories in a custom json file may not be sorted.
|
| 59 |
+
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
|
| 60 |
+
meta.thing_classes = thing_classes
|
| 61 |
+
|
| 62 |
+
# In COCO, certain category ids are artificially removed,
|
| 63 |
+
# and by convention they are always ignored.
|
| 64 |
+
# We deal with COCO's id issue and translate
|
| 65 |
+
# the category ids to contiguous ids in [0, 80).
|
| 66 |
+
|
| 67 |
+
# It works by looking at the "categories" field in the json, therefore
|
| 68 |
+
# if users' own json also have incontiguous ids, we'll
|
| 69 |
+
# apply this mapping as well but print a warning.
|
| 70 |
+
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
|
| 71 |
+
if "coco" not in dataset_name:
|
| 72 |
+
logger.warning(
|
| 73 |
+
"""
|
| 74 |
+
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
|
| 75 |
+
"""
|
| 76 |
+
)
|
| 77 |
+
id_map = {v: i for i, v in enumerate(cat_ids)}
|
| 78 |
+
meta.thing_dataset_id_to_contiguous_id = id_map
|
| 79 |
+
|
| 80 |
+
# sort indices for reproducible results
|
| 81 |
+
img_ids = sorted(coco_api.imgs.keys())
|
| 82 |
+
# imgs is a list of dicts, each looks something like:
|
| 83 |
+
# {'license': 4,
|
| 84 |
+
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
|
| 85 |
+
# 'file_name': 'COCO_val2014_000000001268.jpg',
|
| 86 |
+
# 'height': 427,
|
| 87 |
+
# 'width': 640,
|
| 88 |
+
# 'date_captured': '2013-11-17 05:57:24',
|
| 89 |
+
# 'id': 1268}
|
| 90 |
+
imgs = coco_api.loadImgs(img_ids)
|
| 91 |
+
# anns is a list[list[dict]], where each dict is an annotation
|
| 92 |
+
# record for an object. The inner list enumerates the objects in an image
|
| 93 |
+
# and the outer list enumerates over images. Example of anns[0]:
|
| 94 |
+
# [{'segmentation': [[192.81,
|
| 95 |
+
# 247.09,
|
| 96 |
+
# ...
|
| 97 |
+
# 219.03,
|
| 98 |
+
# 249.06]],
|
| 99 |
+
# 'area': 1035.749,
|
| 100 |
+
# 'iscrowd': 0,
|
| 101 |
+
# 'image_id': 1268,
|
| 102 |
+
# 'bbox': [192.81, 224.8, 74.73, 33.43],
|
| 103 |
+
# 'category_id': 16,
|
| 104 |
+
# 'id': 42986},
|
| 105 |
+
# ...]
|
| 106 |
+
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
|
| 107 |
+
total_num_valid_anns = sum([len(x) for x in anns])
|
| 108 |
+
total_num_anns = len(coco_api.anns)
|
| 109 |
+
if total_num_valid_anns < total_num_anns:
|
| 110 |
+
logger.warning(
|
| 111 |
+
f"{json_file} contains {total_num_anns} annotations, but only "
|
| 112 |
+
f"{total_num_valid_anns} of them match to images in the file."
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
if "minival" not in json_file:
|
| 116 |
+
# The popular valminusminival & minival annotations for COCO2014 contain this bug.
|
| 117 |
+
# However the ratio of buggy annotations there is tiny and does not affect accuracy.
|
| 118 |
+
# Therefore we explicitly white-list them.
|
| 119 |
+
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
|
| 120 |
+
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
|
| 121 |
+
json_file
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
imgs_anns = list(zip(imgs, anns))
|
| 125 |
+
logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
|
| 126 |
+
|
| 127 |
+
dataset_dicts = {}
|
| 128 |
+
|
| 129 |
+
ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"]
|
| 130 |
+
|
| 131 |
+
num_instances_without_valid_segmentation = 0
|
| 132 |
+
|
| 133 |
+
for (img_dict, anno_dict_list) in imgs_anns:
|
| 134 |
+
record = {}
|
| 135 |
+
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
|
| 136 |
+
record["height"] = img_dict["height"]
|
| 137 |
+
record["width"] = img_dict["width"]
|
| 138 |
+
image_id = record["image_id"] = img_dict["id"]
|
| 139 |
+
|
| 140 |
+
objs = []
|
| 141 |
+
for anno in anno_dict_list:
|
| 142 |
+
# Check that the image_id in this annotation is the same as
|
| 143 |
+
# the image_id we're looking at.
|
| 144 |
+
# This fails only when the data parsing logic or the annotation file is buggy.
|
| 145 |
+
|
| 146 |
+
# The original COCO valminusminival2014 & minival2014 annotation files
|
| 147 |
+
# actually contains bugs that, together with certain ways of using COCO API,
|
| 148 |
+
# can trigger this assertion.
|
| 149 |
+
assert anno["image_id"] == image_id
|
| 150 |
+
|
| 151 |
+
assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
|
| 152 |
+
|
| 153 |
+
obj = {key: anno[key] for key in ann_keys if key in anno}
|
| 154 |
+
if "bbox" in obj and len(obj["bbox"]) == 0:
|
| 155 |
+
raise ValueError(
|
| 156 |
+
f"One annotation of image {image_id} contains empty 'bbox' value! "
|
| 157 |
+
"This json does not have valid COCO format."
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
segm = anno.get("segmentation", None)
|
| 161 |
+
if segm: # either list[list[float]] or dict(RLE)
|
| 162 |
+
if isinstance(segm, dict):
|
| 163 |
+
if isinstance(segm["counts"], list):
|
| 164 |
+
# convert to compressed RLE
|
| 165 |
+
segm = mask_util.frPyObjects(segm, *segm["size"])
|
| 166 |
+
else:
|
| 167 |
+
# filter out invalid polygons (< 3 points)
|
| 168 |
+
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
|
| 169 |
+
if len(segm) == 0:
|
| 170 |
+
num_instances_without_valid_segmentation += 1
|
| 171 |
+
continue # ignore this instance
|
| 172 |
+
obj["segmentation"] = segm
|
| 173 |
+
|
| 174 |
+
keypts = anno.get("keypoints", None)
|
| 175 |
+
if keypts: # list[int]
|
| 176 |
+
for idx, v in enumerate(keypts):
|
| 177 |
+
if idx % 3 != 2:
|
| 178 |
+
# COCO's segmentation coordinates are floating points in [0, H or W],
|
| 179 |
+
# but keypoint coordinates are integers in [0, H-1 or W-1]
|
| 180 |
+
# Therefore we assume the coordinates are "pixel indices" and
|
| 181 |
+
# add 0.5 to convert to floating point coordinates.
|
| 182 |
+
keypts[idx] = v + 0.5
|
| 183 |
+
obj["keypoints"] = keypts
|
| 184 |
+
|
| 185 |
+
obj["bbox_mode"] = BoxMode.XYWH_ABS
|
| 186 |
+
if id_map:
|
| 187 |
+
annotation_category_id = obj["category_id"]
|
| 188 |
+
try:
|
| 189 |
+
obj["category_id"] = id_map[annotation_category_id]
|
| 190 |
+
except KeyError as e:
|
| 191 |
+
raise KeyError(
|
| 192 |
+
f"Encountered category_id={annotation_category_id} "
|
| 193 |
+
"but this id does not exist in 'categories' of the json file."
|
| 194 |
+
) from e
|
| 195 |
+
objs.append(obj)
|
| 196 |
+
record["annotations"] = objs
|
| 197 |
+
dataset_dicts[image_id] = record
|
| 198 |
+
|
| 199 |
+
if num_instances_without_valid_segmentation > 0:
|
| 200 |
+
logger.warning(
|
| 201 |
+
"Filtered out {} instances without valid segmentation. ".format(
|
| 202 |
+
num_instances_without_valid_segmentation
|
| 203 |
+
)
|
| 204 |
+
+ "There might be issues in your dataset generation process. Please "
|
| 205 |
+
"check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
|
| 206 |
+
)
|
| 207 |
+
return dataset_dicts
|
| 208 |
+
|
| 209 |
+
def get_metadata():
|
| 210 |
+
meta = {}
|
| 211 |
+
# The following metadata maps contiguous id from [0, #thing categories +
|
| 212 |
+
# #stuff categories) to their names and colors. We have to replica of the
|
| 213 |
+
# same name and color under "thing_*" and "stuff_*" because the current
|
| 214 |
+
# visualization function in D2 handles thing and class classes differently
|
| 215 |
+
# due to some heuristic used in Panoptic FPN. We keep the same naming to
|
| 216 |
+
# enable reusing existing visualization functions.
|
| 217 |
+
thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
| 218 |
+
thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
|
| 219 |
+
stuff_classes = [k["name"] for k in COCO_CATEGORIES]
|
| 220 |
+
stuff_colors = [k["color"] for k in COCO_CATEGORIES]
|
| 221 |
+
|
| 222 |
+
meta["thing_classes"] = thing_classes
|
| 223 |
+
meta["thing_colors"] = thing_colors
|
| 224 |
+
meta["stuff_classes"] = stuff_classes
|
| 225 |
+
meta["stuff_colors"] = stuff_colors
|
| 226 |
+
|
| 227 |
+
# Convert category id for training:
|
| 228 |
+
# category id: like semantic segmentation, it is the class id for each
|
| 229 |
+
# pixel. Since there are some classes not used in evaluation, the category
|
| 230 |
+
# id is not always contiguous and thus we have two set of category ids:
|
| 231 |
+
# - original category id: category id in the original dataset, mainly
|
| 232 |
+
# used for evaluation.
|
| 233 |
+
# - contiguous category id: [0, #classes), in order to train the linear
|
| 234 |
+
# softmax classifier.
|
| 235 |
+
thing_dataset_id_to_contiguous_id = {}
|
| 236 |
+
stuff_dataset_id_to_contiguous_id = {}
|
| 237 |
+
|
| 238 |
+
for i, cat in enumerate(COCO_CATEGORIES):
|
| 239 |
+
if cat["isthing"]:
|
| 240 |
+
thing_dataset_id_to_contiguous_id[cat["id"]] = i
|
| 241 |
+
# else:
|
| 242 |
+
# stuff_dataset_id_to_contiguous_id[cat["id"]] = i
|
| 243 |
+
|
| 244 |
+
# in order to use sem_seg evaluator
|
| 245 |
+
stuff_dataset_id_to_contiguous_id[cat["id"]] = i
|
| 246 |
+
|
| 247 |
+
meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
|
| 248 |
+
meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
|
| 249 |
+
|
| 250 |
+
return meta
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def load_coco_panoptic_json(json_file, instances_json, instances_name, image_dir, gt_dir, semseg_dir, meta):
|
| 254 |
+
"""
|
| 255 |
+
Args:
|
| 256 |
+
image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
|
| 257 |
+
gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
|
| 258 |
+
json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
|
| 259 |
+
Returns:
|
| 260 |
+
list[dict]: a list of dicts in Detectron2 standard format. (See
|
| 261 |
+
`Using Custom Datasets </tutorials/datasets.html>`_ )
|
| 262 |
+
"""
|
| 263 |
+
|
| 264 |
+
def _convert_category_id(segment_info, meta):
|
| 265 |
+
if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
|
| 266 |
+
segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
|
| 267 |
+
segment_info["category_id"]
|
| 268 |
+
]
|
| 269 |
+
segment_info["isthing"] = True
|
| 270 |
+
else:
|
| 271 |
+
segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
|
| 272 |
+
segment_info["category_id"]
|
| 273 |
+
]
|
| 274 |
+
segment_info["isthing"] = False
|
| 275 |
+
return segment_info
|
| 276 |
+
|
| 277 |
+
with PathManager.open(json_file) as f:
|
| 278 |
+
json_info = json.load(f)
|
| 279 |
+
|
| 280 |
+
instance_data_dicts = load_coco_instance_json(instances_json, image_dir.replace("panoptic_", ""), instances_name)
|
| 281 |
+
|
| 282 |
+
ret = []
|
| 283 |
+
for ann in json_info["annotations"]:
|
| 284 |
+
image_id = int(ann["image_id"])
|
| 285 |
+
# TODO: currently we assume image and label has the same filename but
|
| 286 |
+
# different extension, and images have extension ".jpg" for COCO. Need
|
| 287 |
+
# to make image extension a user-provided argument if we extend this
|
| 288 |
+
# function to support other COCO-like datasets.
|
| 289 |
+
image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
|
| 290 |
+
label_file = os.path.join(gt_dir, ann["file_name"])
|
| 291 |
+
sem_label_file = os.path.join(semseg_dir, ann["file_name"])
|
| 292 |
+
segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
|
| 293 |
+
ret.append(
|
| 294 |
+
{
|
| 295 |
+
"file_name": image_file,
|
| 296 |
+
"image_id": image_id,
|
| 297 |
+
"pan_seg_file_name": label_file,
|
| 298 |
+
"sem_seg_file_name": sem_label_file,
|
| 299 |
+
"segments_info": segments_info,
|
| 300 |
+
"annotations": instance_data_dicts[image_id]["annotations"],
|
| 301 |
+
}
|
| 302 |
+
)
|
| 303 |
+
assert len(ret), f"No images found in {image_dir}!"
|
| 304 |
+
assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
|
| 305 |
+
assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
|
| 306 |
+
assert PathManager.isfile(ret[0]["sem_seg_file_name"]), ret[0]["sem_seg_file_name"]
|
| 307 |
+
return ret
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def register_coco_panoptic_annos_sem_seg(
|
| 311 |
+
name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json, instances_name,
|
| 312 |
+
):
|
| 313 |
+
panoptic_name = name
|
| 314 |
+
delattr(MetadataCatalog.get(panoptic_name), "thing_classes")
|
| 315 |
+
delattr(MetadataCatalog.get(panoptic_name), "thing_colors")
|
| 316 |
+
MetadataCatalog.get(panoptic_name).set(
|
| 317 |
+
thing_classes=metadata["thing_classes"],
|
| 318 |
+
thing_colors=metadata["thing_colors"],
|
| 319 |
+
# thing_dataset_id_to_contiguous_id=metadata["thing_dataset_id_to_contiguous_id"],
|
| 320 |
+
)
|
| 321 |
+
|
| 322 |
+
# the name is "coco_2017_train_panoptic_with_sem_seg" and "coco_2017_val_panoptic_with_sem_seg"
|
| 323 |
+
semantic_name = name + "_with_sem_seg"
|
| 324 |
+
DatasetCatalog.register(
|
| 325 |
+
semantic_name,
|
| 326 |
+
lambda: load_coco_panoptic_json(panoptic_json, instances_json, instances_name, image_root, panoptic_root, sem_seg_root, metadata),
|
| 327 |
+
)
|
| 328 |
+
MetadataCatalog.get(semantic_name).set(
|
| 329 |
+
sem_seg_root=sem_seg_root,
|
| 330 |
+
panoptic_root=panoptic_root,
|
| 331 |
+
image_root=image_root,
|
| 332 |
+
panoptic_json=panoptic_json,
|
| 333 |
+
json_file=instances_json,
|
| 334 |
+
evaluator_type="coco_panoptic_seg",
|
| 335 |
+
ignore_label=255,
|
| 336 |
+
label_divisor=1000,
|
| 337 |
+
**metadata,
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def register_all_coco_panoptic_annos_sem_seg(root):
|
| 342 |
+
for (
|
| 343 |
+
prefix,
|
| 344 |
+
(panoptic_root, panoptic_json, semantic_root),
|
| 345 |
+
) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
|
| 346 |
+
|
| 347 |
+
prefix_instances = prefix[: -len("_panoptic")]
|
| 348 |
+
instances_meta = MetadataCatalog.get(prefix_instances)
|
| 349 |
+
image_root, instances_json = instances_meta.image_root, instances_meta.json_file
|
| 350 |
+
|
| 351 |
+
if 'val' in instances_json:
|
| 352 |
+
instances_json = instances_json.replace('instances_', 'panoptic2instances_')
|
| 353 |
+
|
| 354 |
+
register_coco_panoptic_annos_sem_seg(
|
| 355 |
+
prefix,
|
| 356 |
+
get_metadata(),
|
| 357 |
+
image_root,
|
| 358 |
+
os.path.join(root, panoptic_root),
|
| 359 |
+
os.path.join(root, panoptic_json),
|
| 360 |
+
os.path.join(root, semantic_root),
|
| 361 |
+
instances_json,
|
| 362 |
+
prefix_instances,
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
_root = os.getenv("DETECTRON2_DATASETS", "datasets")
|
| 367 |
+
register_all_coco_panoptic_annos_sem_seg(_root)
|
RAVE-main/annotator/oneformer/oneformer/data/tokenizer.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -------------------------------------------------------------------------
|
| 2 |
+
# MIT License
|
| 3 |
+
#
|
| 4 |
+
# Copyright (c) 2021 OpenAI
|
| 5 |
+
#
|
| 6 |
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 7 |
+
# of this software and associated documentation files (the "Software"), to deal
|
| 8 |
+
# in the Software without restriction, including without limitation the rights
|
| 9 |
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 10 |
+
# copies of the Software, and to permit persons to whom the Software is
|
| 11 |
+
# furnished to do so, subject to the following conditions:
|
| 12 |
+
#
|
| 13 |
+
# The above copyright notice and this permission notice shall be included in all
|
| 14 |
+
# copies or substantial portions of the Software.
|
| 15 |
+
#
|
| 16 |
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 17 |
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 18 |
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 19 |
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 20 |
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 21 |
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 22 |
+
# SOFTWARE.
|
| 23 |
+
#
|
| 24 |
+
# Modified by Jiarui Xu
|
| 25 |
+
# -------------------------------------------------------------------------
|
| 26 |
+
|
| 27 |
+
import gzip
|
| 28 |
+
import html
|
| 29 |
+
import os
|
| 30 |
+
from functools import lru_cache
|
| 31 |
+
|
| 32 |
+
import ftfy
|
| 33 |
+
import regex as re
|
| 34 |
+
import torch
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@lru_cache()
|
| 38 |
+
def default_bpe():
|
| 39 |
+
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'bpe_simple_vocab_16e6.txt.gz')
|
| 40 |
+
|
| 41 |
+
@lru_cache()
|
| 42 |
+
def bytes_to_unicode():
|
| 43 |
+
"""Returns list of utf-8 byte and a corresponding list of unicode strings.
|
| 44 |
+
|
| 45 |
+
The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
|
| 46 |
+
if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent
|
| 47 |
+
coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables
|
| 48 |
+
between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
|
| 49 |
+
"""
|
| 50 |
+
bs = list(range(ord('!'), ord('~') + 1)) + list(range(ord('¡'), ord('¬') + 1)) + list(range(ord('®'), ord('ÿ') + 1))
|
| 51 |
+
cs = bs[:]
|
| 52 |
+
n = 0
|
| 53 |
+
for b in range(2**8):
|
| 54 |
+
if b not in bs:
|
| 55 |
+
bs.append(b)
|
| 56 |
+
cs.append(2**8 + n)
|
| 57 |
+
n += 1
|
| 58 |
+
cs = [chr(n) for n in cs]
|
| 59 |
+
return dict(zip(bs, cs))
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_pairs(word):
|
| 63 |
+
"""Return set of symbol pairs in a word.
|
| 64 |
+
|
| 65 |
+
Word is represented as tuple of symbols (symbols being variable-length strings).
|
| 66 |
+
"""
|
| 67 |
+
pairs = set()
|
| 68 |
+
prev_char = word[0]
|
| 69 |
+
for char in word[1:]:
|
| 70 |
+
pairs.add((prev_char, char))
|
| 71 |
+
prev_char = char
|
| 72 |
+
return pairs
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def basic_clean(text):
|
| 76 |
+
text = ftfy.fix_text(text)
|
| 77 |
+
text = html.unescape(html.unescape(text))
|
| 78 |
+
return text.strip()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def whitespace_clean(text):
|
| 82 |
+
text = re.sub(r'\s+', ' ', text)
|
| 83 |
+
text = text.strip()
|
| 84 |
+
return text
|
| 85 |
+
|
| 86 |
+
class Tokenize:
|
| 87 |
+
|
| 88 |
+
def __init__(self, tokenizer, max_seq_len=77, truncate=True):
|
| 89 |
+
self.tokenizer = tokenizer
|
| 90 |
+
self.max_seq_len = max_seq_len
|
| 91 |
+
self.truncate = truncate
|
| 92 |
+
|
| 93 |
+
def __call__(self, texts):
|
| 94 |
+
expanded_dim = False
|
| 95 |
+
if isinstance(texts, str):
|
| 96 |
+
texts = [texts]
|
| 97 |
+
expanded_dim = True
|
| 98 |
+
|
| 99 |
+
sot_token = self.tokenizer.encoder['<|startoftext|>']
|
| 100 |
+
eot_token = self.tokenizer.encoder['<|endoftext|>']
|
| 101 |
+
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token] for text in texts]
|
| 102 |
+
result = torch.zeros(len(all_tokens), self.max_seq_len, dtype=torch.long)
|
| 103 |
+
|
| 104 |
+
for i, tokens in enumerate(all_tokens):
|
| 105 |
+
if len(tokens) > self.max_seq_len:
|
| 106 |
+
if self.truncate:
|
| 107 |
+
tokens = tokens[:self.max_seq_len]
|
| 108 |
+
tokens[-1] = eot_token
|
| 109 |
+
else:
|
| 110 |
+
raise RuntimeError(f'Input {texts[i]} is too long for context length {self.max_seq_len}')
|
| 111 |
+
result[i, :len(tokens)] = torch.tensor(tokens)
|
| 112 |
+
|
| 113 |
+
if expanded_dim:
|
| 114 |
+
return result[0]
|
| 115 |
+
|
| 116 |
+
return result
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class SimpleTokenizer(object):
|
| 120 |
+
|
| 121 |
+
def __init__(self, bpe_path: str = default_bpe()):
|
| 122 |
+
self.byte_encoder = bytes_to_unicode()
|
| 123 |
+
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
| 124 |
+
merges = gzip.open(bpe_path).read().decode('utf-8').split('\n')
|
| 125 |
+
merges = merges[1:49152 - 256 - 2 + 1]
|
| 126 |
+
merges = [tuple(merge.split()) for merge in merges]
|
| 127 |
+
vocab = list(bytes_to_unicode().values())
|
| 128 |
+
vocab = vocab + [v + '</w>' for v in vocab]
|
| 129 |
+
for merge in merges:
|
| 130 |
+
vocab.append(''.join(merge))
|
| 131 |
+
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
|
| 132 |
+
self.encoder = dict(zip(vocab, range(len(vocab))))
|
| 133 |
+
self.decoder = {v: k for k, v in self.encoder.items()}
|
| 134 |
+
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
| 135 |
+
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
|
| 136 |
+
self.pat = re.compile(
|
| 137 |
+
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
|
| 138 |
+
re.IGNORECASE)
|
| 139 |
+
|
| 140 |
+
def bpe(self, token):
|
| 141 |
+
if token in self.cache:
|
| 142 |
+
return self.cache[token]
|
| 143 |
+
word = tuple(token[:-1]) + (token[-1] + '</w>', )
|
| 144 |
+
pairs = get_pairs(word)
|
| 145 |
+
|
| 146 |
+
if not pairs:
|
| 147 |
+
return token + '</w>'
|
| 148 |
+
|
| 149 |
+
while True:
|
| 150 |
+
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
|
| 151 |
+
if bigram not in self.bpe_ranks:
|
| 152 |
+
break
|
| 153 |
+
first, second = bigram
|
| 154 |
+
new_word = []
|
| 155 |
+
i = 0
|
| 156 |
+
while i < len(word):
|
| 157 |
+
try:
|
| 158 |
+
j = word.index(first, i)
|
| 159 |
+
new_word.extend(word[i:j])
|
| 160 |
+
i = j
|
| 161 |
+
except: # noqa: E722
|
| 162 |
+
new_word.extend(word[i:])
|
| 163 |
+
break
|
| 164 |
+
|
| 165 |
+
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
|
| 166 |
+
new_word.append(first + second)
|
| 167 |
+
i += 2
|
| 168 |
+
else:
|
| 169 |
+
new_word.append(word[i])
|
| 170 |
+
i += 1
|
| 171 |
+
new_word = tuple(new_word)
|
| 172 |
+
word = new_word
|
| 173 |
+
if len(word) == 1:
|
| 174 |
+
break
|
| 175 |
+
else:
|
| 176 |
+
pairs = get_pairs(word)
|
| 177 |
+
word = ' '.join(word)
|
| 178 |
+
self.cache[token] = word
|
| 179 |
+
return word
|
| 180 |
+
|
| 181 |
+
def encode(self, text):
|
| 182 |
+
bpe_tokens = []
|
| 183 |
+
text = whitespace_clean(basic_clean(text)).lower()
|
| 184 |
+
for token in re.findall(self.pat, text):
|
| 185 |
+
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
|
| 186 |
+
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
|
| 187 |
+
return bpe_tokens
|
| 188 |
+
|
| 189 |
+
def decode(self, tokens):
|
| 190 |
+
text = ''.join([self.decoder[token] for token in tokens])
|
| 191 |
+
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors='replace').replace('</w>', ' ')
|
| 192 |
+
return text
|
RAVE-main/annotator/oneformer/oneformer/demo/colormap.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates.
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
An awesome colormap for really neat visualizations.
|
| 5 |
+
Copied from Detectron, and removed gray colors.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import random
|
| 10 |
+
random.seed(0)
|
| 11 |
+
|
| 12 |
+
__all__ = ["colormap", "random_color", "random_colors"]
|
| 13 |
+
|
| 14 |
+
# fmt: off
|
| 15 |
+
# RGB:
|
| 16 |
+
# _COLORS = np.array(
|
| 17 |
+
# [
|
| 18 |
+
# 0.000, 0.447, 0.741,
|
| 19 |
+
# 0.850, 0.325, 0.098,
|
| 20 |
+
# 0.929, 0.694, 0.125,
|
| 21 |
+
# 0.494, 0.184, 0.556,
|
| 22 |
+
# 0.466, 0.674, 0.188,
|
| 23 |
+
# 0.301, 0.745, 0.933,
|
| 24 |
+
# 0.635, 0.078, 0.184,
|
| 25 |
+
# 0.300, 0.300, 0.300,
|
| 26 |
+
# 0.600, 0.600, 0.600,
|
| 27 |
+
# 1.000, 0.000, 0.000,
|
| 28 |
+
# 1.000, 0.500, 0.000,
|
| 29 |
+
# 0.749, 0.749, 0.000,
|
| 30 |
+
# 0.000, 1.000, 0.000,
|
| 31 |
+
# 0.000, 0.000, 1.000,
|
| 32 |
+
# 0.667, 0.000, 1.000,
|
| 33 |
+
# 0.333, 0.333, 0.000,
|
| 34 |
+
# 0.333, 0.667, 0.000,
|
| 35 |
+
# 0.333, 1.000, 0.000,
|
| 36 |
+
# 0.667, 0.333, 0.000,
|
| 37 |
+
# 0.667, 0.667, 0.000,
|
| 38 |
+
# 0.667, 1.000, 0.000,
|
| 39 |
+
# 1.000, 0.333, 0.000,
|
| 40 |
+
# 1.000, 0.667, 0.000,
|
| 41 |
+
# 1.000, 1.000, 0.000,
|
| 42 |
+
# 0.000, 0.333, 0.500,
|
| 43 |
+
# 0.000, 0.667, 0.500,
|
| 44 |
+
# 0.000, 1.000, 0.500,
|
| 45 |
+
# 0.333, 0.000, 0.500,
|
| 46 |
+
# 0.333, 0.333, 0.500,
|
| 47 |
+
# 0.333, 0.667, 0.500,
|
| 48 |
+
# 0.333, 1.000, 0.500,
|
| 49 |
+
# 0.667, 0.000, 0.500,
|
| 50 |
+
# 0.667, 0.333, 0.500,
|
| 51 |
+
# 0.667, 0.667, 0.500,
|
| 52 |
+
# 0.667, 1.000, 0.500,
|
| 53 |
+
# 1.000, 0.000, 0.500,
|
| 54 |
+
# 1.000, 0.333, 0.500,
|
| 55 |
+
# 1.000, 0.667, 0.500,
|
| 56 |
+
# 1.000, 1.000, 0.500,
|
| 57 |
+
# 0.000, 0.333, 1.000,
|
| 58 |
+
# 0.000, 0.667, 1.000,
|
| 59 |
+
# 0.000, 1.000, 1.000,
|
| 60 |
+
# 0.333, 0.000, 1.000,
|
| 61 |
+
# 0.333, 0.333, 1.000,
|
| 62 |
+
# 0.333, 0.667, 1.000,
|
| 63 |
+
# 0.333, 1.000, 1.000,
|
| 64 |
+
# 0.667, 0.000, 1.000,
|
| 65 |
+
# 0.667, 0.333, 1.000,
|
| 66 |
+
# 0.667, 0.667, 1.000,
|
| 67 |
+
# 0.667, 1.000, 1.000,
|
| 68 |
+
# 1.000, 0.000, 1.000,
|
| 69 |
+
# 1.000, 0.333, 1.000,
|
| 70 |
+
# 1.000, 0.667, 1.000,
|
| 71 |
+
# 0.333, 0.000, 0.000,
|
| 72 |
+
# 0.500, 0.000, 0.000,
|
| 73 |
+
# 0.667, 0.000, 0.000,
|
| 74 |
+
# 0.833, 0.000, 0.000,
|
| 75 |
+
# 1.000, 0.000, 0.000,
|
| 76 |
+
# 0.000, 0.167, 0.000,
|
| 77 |
+
# 0.000, 0.333, 0.000,
|
| 78 |
+
# 0.000, 0.500, 0.000,
|
| 79 |
+
# 0.000, 0.667, 0.000,
|
| 80 |
+
# 0.000, 0.833, 0.000,
|
| 81 |
+
# 0.000, 1.000, 0.000,
|
| 82 |
+
# 0.000, 0.000, 0.167,
|
| 83 |
+
# 0.000, 0.000, 0.333,
|
| 84 |
+
# 0.000, 0.000, 0.500,
|
| 85 |
+
# 0.000, 0.000, 0.667,
|
| 86 |
+
# 0.000, 0.000, 0.833,
|
| 87 |
+
# 0.000, 0.000, 1.000,
|
| 88 |
+
# 0.000, 0.000, 0.000,
|
| 89 |
+
# 0.143, 0.143, 0.143,
|
| 90 |
+
# 0.857, 0.857, 0.857,
|
| 91 |
+
# 1.000, 1.000, 1.000
|
| 92 |
+
# ]
|
| 93 |
+
# ).astype(np.float32).reshape(-1, 3)
|
| 94 |
+
# fmt: on
|
| 95 |
+
|
| 96 |
+
_COLORS = []
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def gen_color():
|
| 100 |
+
color = tuple(np.round(np.random.choice(range(256), size=3)/255, 3))
|
| 101 |
+
if color not in _COLORS and np.mean(color) != 0.0:
|
| 102 |
+
_COLORS.append(color)
|
| 103 |
+
else:
|
| 104 |
+
gen_color()
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
for _ in range(300):
|
| 108 |
+
gen_color()
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def colormap(rgb=False, maximum=255):
|
| 112 |
+
"""
|
| 113 |
+
Args:
|
| 114 |
+
rgb (bool): whether to return RGB colors or BGR colors.
|
| 115 |
+
maximum (int): either 255 or 1
|
| 116 |
+
Returns:
|
| 117 |
+
ndarray: a float32 array of Nx3 colors, in range [0, 255] or [0, 1]
|
| 118 |
+
"""
|
| 119 |
+
assert maximum in [255, 1], maximum
|
| 120 |
+
c = _COLORS * maximum
|
| 121 |
+
if not rgb:
|
| 122 |
+
c = c[:, ::-1]
|
| 123 |
+
return c
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def random_color(rgb=False, maximum=255):
|
| 127 |
+
"""
|
| 128 |
+
Args:
|
| 129 |
+
rgb (bool): whether to return RGB colors or BGR colors.
|
| 130 |
+
maximum (int): either 255 or 1
|
| 131 |
+
Returns:
|
| 132 |
+
ndarray: a vector of 3 numbers
|
| 133 |
+
"""
|
| 134 |
+
idx = np.random.randint(0, len(_COLORS))
|
| 135 |
+
ret = _COLORS[idx] * maximum
|
| 136 |
+
if not rgb:
|
| 137 |
+
ret = ret[::-1]
|
| 138 |
+
return ret
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def random_colors(N, rgb=False, maximum=255):
|
| 142 |
+
"""
|
| 143 |
+
Args:
|
| 144 |
+
N (int): number of unique colors needed
|
| 145 |
+
rgb (bool): whether to return RGB colors or BGR colors.
|
| 146 |
+
maximum (int): either 255 or 1
|
| 147 |
+
Returns:
|
| 148 |
+
ndarray: a list of random_color
|
| 149 |
+
"""
|
| 150 |
+
indices = random.sample(range(len(_COLORS)), N)
|
| 151 |
+
ret = [_COLORS[i] * maximum for i in indices]
|
| 152 |
+
if not rgb:
|
| 153 |
+
ret = [x[::-1] for x in ret]
|
| 154 |
+
return ret
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
if __name__ == "__main__":
|
| 158 |
+
import cv2
|
| 159 |
+
|
| 160 |
+
size = 100
|
| 161 |
+
H, W = 10, 10
|
| 162 |
+
canvas = np.random.rand(H * size, W * size, 3).astype("float32")
|
| 163 |
+
for h in range(H):
|
| 164 |
+
for w in range(W):
|
| 165 |
+
idx = h * W + w
|
| 166 |
+
if idx >= len(_COLORS):
|
| 167 |
+
break
|
| 168 |
+
canvas[h * size : (h + 1) * size, w * size : (w + 1) * size] = _COLORS[idx]
|
| 169 |
+
cv2.imshow("a", canvas)
|
| 170 |
+
cv2.waitKey(0)
|