file_path stringlengths 3 280 | file_language stringclasses 66 values | content stringlengths 1 1.04M | repo_name stringlengths 5 92 | repo_stars int64 0 154k | repo_description stringlengths 0 402 | repo_primary_language stringclasses 108 values | developer_username stringlengths 1 25 | developer_name stringlengths 0 30 | developer_company stringlengths 0 82 |
|---|---|---|---|---|---|---|---|---|---|
src/tools/voc_eval_lib/setup.py | Python | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
from os.path import join as pjoin
import numpy as np
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def find_in_path(name, path):
"Find a file in a search path"
#adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
print(extra_postargs)
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with gcc
# the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_61',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
)
]
setup(
name='tf_faster_rcnn',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
)
| xingyizhou/CenterNet | 7,541 | Object detection, 3D detection, and pose estimation using center point detection: | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/voc_eval_lib/utils/__init__.py | Python | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
| xingyizhou/CenterNet | 7,541 | Object detection, 3D detection, and pose estimation using center point detection: | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/voc_eval_lib/utils/bbox.pyx | Cython | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Sergey Karayev
# --------------------------------------------------------
cimport cython
import numpy as np
cimport numpy as np
DTYPE = np.float
ctypedef np.float_t DTYPE_t
def bbox_overlaps(
np.ndarray[DTYPE_t, ndim=2] boxes,
np.ndarray[DTYPE_t, ndim=2] query_boxes):
"""
Parameters
----------
boxes: (N, 4) ndarray of float
query_boxes: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
cdef unsigned int N = boxes.shape[0]
cdef unsigned int K = query_boxes.shape[0]
cdef np.ndarray[DTYPE_t, ndim=2] overlaps = np.zeros((N, K), dtype=DTYPE)
cdef DTYPE_t iw, ih, box_area
cdef DTYPE_t ua
cdef unsigned int k, n
for k in range(K):
box_area = (
(query_boxes[k, 2] - query_boxes[k, 0] + 1) *
(query_boxes[k, 3] - query_boxes[k, 1] + 1)
)
for n in range(N):
iw = (
min(boxes[n, 2], query_boxes[k, 2]) -
max(boxes[n, 0], query_boxes[k, 0]) + 1
)
if iw > 0:
ih = (
min(boxes[n, 3], query_boxes[k, 3]) -
max(boxes[n, 1], query_boxes[k, 1]) + 1
)
if ih > 0:
ua = float(
(boxes[n, 2] - boxes[n, 0] + 1) *
(boxes[n, 3] - boxes[n, 1] + 1) +
box_area - iw * ih
)
overlaps[n, k] = iw * ih / ua
return overlaps
| xingyizhou/CenterNet | 7,541 | Object detection, 3D detection, and pose estimation using center point detection: | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/voc_eval_lib/utils/blob.py | Python | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Blob helper functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
def im_list_to_blob(ims):
"""Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
| xingyizhou/CenterNet | 7,541 | Object detection, 3D detection, and pose estimation using center point detection: | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/voc_eval_lib/utils/timer.py | Python | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import time
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
| xingyizhou/CenterNet | 7,541 | Object detection, 3D detection, and pose estimation using center point detection: | Python | xingyizhou | Xingyi Zhou | Meta |
src/tools/voc_eval_lib/utils/visualization.py | Python | # --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
NUM_COLORS = len(STANDARD_COLORS)
try:
FONT = ImageFont.truetype('arial.ttf', 24)
except IOError:
FONT = ImageFont.load_default()
def _draw_single_box(image, xmin, ymin, xmax, ymax, display_str, font, color='black', thickness=4):
draw = ImageDraw.Draw(image)
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=color)
text_bottom = bottom
# Reverse list and print from bottom to top.
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=color)
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
return image
def draw_bounding_boxes(image, gt_boxes, im_info):
num_boxes = gt_boxes.shape[0]
gt_boxes_new = gt_boxes.copy()
gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
disp_image = Image.fromarray(np.uint8(image[0]))
for i in range(num_boxes):
this_class = int(gt_boxes_new[i, 4])
disp_image = _draw_single_box(disp_image,
gt_boxes_new[i, 0],
gt_boxes_new[i, 1],
gt_boxes_new[i, 2],
gt_boxes_new[i, 3],
'N%02d-C%02d' % (i, this_class),
FONT,
color=STANDARD_COLORS[this_class % NUM_COLORS])
image[0, :] = np.array(disp_image)
return image
| xingyizhou/CenterNet | 7,541 | Object detection, 3D detection, and pose estimation using center point detection: | Python | xingyizhou | Xingyi Zhou | Meta |
.github/workflows/levenshtein.js | JavaScript | /*
Copyright (c) 2011 Andrei Mackenzie
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
// Compute the edit distance between the two given strings
exports.getEditDistance = function(a, b){
if(a.length == 0) return b.length;
if(b.length == 0) return a.length;
var matrix = [];
// increment along the first column of each row
var i;
for(i = 0; i <= b.length; i++){
matrix[i] = [i];
}
// increment each column in the first row
var j;
for(j = 0; j <= a.length; j++){
matrix[0][j] = j;
}
// Fill in the rest of the matrix
for(i = 1; i <= b.length; i++){
for(j = 1; j <= a.length; j++){
if(b.charAt(i-1) == a.charAt(j-1)){
matrix[i][j] = matrix[i-1][j-1];
} else {
matrix[i][j] = Math.min(matrix[i-1][j-1] + 1, // substitution
Math.min(matrix[i][j-1] + 1, // insertion
matrix[i-1][j] + 1)); // deletion
}
}
}
return matrix[b.length][a.length];
};
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/__init__.py | Python | from .modeling.meta_arch.centernet_detector import CenterNetDetector
from .modeling.dense_heads.centernet import CenterNet
from .modeling.roi_heads.custom_roi_heads import CustomROIHeads, CustomCascadeROIHeads
from .modeling.backbone.fpn_p5 import build_p67_resnet_fpn_backbone
from .modeling.backbone.dla import build_dla_backbone
from .modeling.backbone.dlafpn import build_dla_fpn3_backbone
from .modeling.backbone.bifpn import build_resnet_bifpn_backbone
from .modeling.backbone.bifpn_fcos import build_fcos_resnet_bifpn_backbone
from .modeling.backbone.res2net import build_p67_res2net_fpn_backbone
from .data.datasets.objects365 import categories_v1
from .data.datasets.coco import _PREDEFINED_SPLITS_COCO
from .data.datasets import nuimages
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/config.py | Python | from detectron2.config import CfgNode as CN
def add_centernet_config(cfg):
_C = cfg
_C.MODEL.CENTERNET = CN()
_C.MODEL.CENTERNET.NUM_CLASSES = 80
_C.MODEL.CENTERNET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
_C.MODEL.CENTERNET.FPN_STRIDES = [8, 16, 32, 64, 128]
_C.MODEL.CENTERNET.PRIOR_PROB = 0.01
_C.MODEL.CENTERNET.INFERENCE_TH = 0.05
_C.MODEL.CENTERNET.CENTER_NMS = False
_C.MODEL.CENTERNET.NMS_TH_TRAIN = 0.6
_C.MODEL.CENTERNET.NMS_TH_TEST = 0.6
_C.MODEL.CENTERNET.PRE_NMS_TOPK_TRAIN = 1000
_C.MODEL.CENTERNET.POST_NMS_TOPK_TRAIN = 100
_C.MODEL.CENTERNET.PRE_NMS_TOPK_TEST = 1000
_C.MODEL.CENTERNET.POST_NMS_TOPK_TEST = 100
_C.MODEL.CENTERNET.NORM = "GN"
_C.MODEL.CENTERNET.USE_DEFORMABLE = False
_C.MODEL.CENTERNET.NUM_CLS_CONVS = 4
_C.MODEL.CENTERNET.NUM_BOX_CONVS = 4
_C.MODEL.CENTERNET.NUM_SHARE_CONVS = 0
_C.MODEL.CENTERNET.LOC_LOSS_TYPE = 'giou'
_C.MODEL.CENTERNET.SIGMOID_CLAMP = 1e-4
_C.MODEL.CENTERNET.HM_MIN_OVERLAP = 0.8
_C.MODEL.CENTERNET.MIN_RADIUS = 4
_C.MODEL.CENTERNET.SOI = [[0, 80], [64, 160], [128, 320], [256, 640], [512, 10000000]]
_C.MODEL.CENTERNET.POS_WEIGHT = 1.
_C.MODEL.CENTERNET.NEG_WEIGHT = 1.
_C.MODEL.CENTERNET.REG_WEIGHT = 2.
_C.MODEL.CENTERNET.HM_FOCAL_BETA = 4
_C.MODEL.CENTERNET.HM_FOCAL_ALPHA = 0.25
_C.MODEL.CENTERNET.LOSS_GAMMA = 2.0
_C.MODEL.CENTERNET.WITH_AGN_HM = False
_C.MODEL.CENTERNET.ONLY_PROPOSAL = False
_C.MODEL.CENTERNET.AS_PROPOSAL = False
_C.MODEL.CENTERNET.IGNORE_HIGH_FP = -1.
_C.MODEL.CENTERNET.MORE_POS = False
_C.MODEL.CENTERNET.MORE_POS_THRESH = 0.2
_C.MODEL.CENTERNET.MORE_POS_TOPK = 9
_C.MODEL.CENTERNET.NOT_NORM_REG = True
_C.MODEL.CENTERNET.NOT_NMS = False
_C.MODEL.CENTERNET.NO_REDUCE = False
_C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False
_C.MODEL.ROI_BOX_HEAD.PRIOR_PROB = 0.01
_C.MODEL.ROI_BOX_HEAD.USE_EQL_LOSS = False
_C.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH = \
'datasets/lvis/lvis_v1_train_cat_info.json'
_C.MODEL.ROI_BOX_HEAD.EQL_FREQ_CAT = 200
_C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False
_C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CAT = 50
_C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT = 0.5
_C.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE = False
_C.MODEL.BIFPN = CN()
_C.MODEL.BIFPN.NUM_LEVELS = 5
_C.MODEL.BIFPN.NUM_BIFPN = 6
_C.MODEL.BIFPN.NORM = 'GN'
_C.MODEL.BIFPN.OUT_CHANNELS = 160
_C.MODEL.BIFPN.SEPARABLE_CONV = False
_C.MODEL.DLA = CN()
_C.MODEL.DLA.OUT_FEATURES = ['dla2']
_C.MODEL.DLA.USE_DLA_UP = True
_C.MODEL.DLA.NUM_LAYERS = 34
_C.MODEL.DLA.MS_OUTPUT = False
_C.MODEL.DLA.NORM = 'BN'
_C.MODEL.DLA.DLAUP_IN_FEATURES = ['dla3', 'dla4', 'dla5']
_C.MODEL.DLA.DLAUP_NODE = 'conv'
_C.SOLVER.RESET_ITER = False
_C.SOLVER.TRAIN_ITER = -1
_C.INPUT.CUSTOM_AUG = ''
_C.INPUT.TRAIN_SIZE = 640
_C.INPUT.TEST_SIZE = 640
_C.INPUT.SCALE_RANGE = (0.1, 2.)
# 'default' for fixed short/ long edge, 'square' for max size=INPUT.SIZE
_C.INPUT.TEST_INPUT_TYPE = 'default'
_C.INPUT.NOT_CLAMP_BOX = False
_C.DEBUG = False
_C.SAVE_DEBUG = False
_C.SAVE_PTH = False
_C.VIS_THRESH = 0.3
_C.DEBUG_SHOW_NAME = False
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/data/custom_build_augmentation.py | Python | import logging
import numpy as np
import pycocotools.mask as mask_util
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
RotatedBoxes,
polygons_to_bitmask,
)
from detectron2.data import transforms as T
from .transforms.custom_augmentation_impl import EfficientDetResizeCrop
def build_custom_augmentation(cfg, is_train):
"""
Create a list of default :class:`Augmentation` from config.
Now it includes resizing and flipping.
Returns:
list[Augmentation]
"""
if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge':
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
if is_train:
scale = cfg.INPUT.SCALE_RANGE
size = cfg.INPUT.TRAIN_SIZE
else:
scale = (1, 1)
size = cfg.INPUT.TEST_SIZE
augmentation = [EfficientDetResizeCrop(size, scale)]
else:
assert 0, cfg.INPUT.CUSTOM_AUG
if is_train:
augmentation.append(T.RandomFlip())
return augmentation
build_custom_transform_gen = build_custom_augmentation
"""
Alias for backward-compatibility.
""" | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/data/custom_dataset_dataloader.py | Python | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import operator
import torch
import torch.utils.data
import json
from detectron2.utils.comm import get_world_size
from detectron2.data import samplers
from torch.utils.data.sampler import BatchSampler, Sampler
from detectron2.data.common import DatasetFromList, MapDataset
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader
from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler
from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram
from detectron2.data.build import filter_images_with_only_crowd_annotations
from detectron2.data.build import filter_images_with_few_keypoints
from detectron2.data.build import check_metadata_consistency
from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
from detectron2.utils import comm
import itertools
import math
from collections import defaultdict
from typing import Optional
# from .custom_build_augmentation import build_custom_augmentation
def build_custom_train_loader(cfg, mapper=None):
"""
Modified from detectron2.data.build.build_custom_train_loader, but supports
different samplers
"""
source_aware = cfg.DATALOADER.SOURCE_AWARE
if source_aware:
dataset_dicts = get_detection_dataset_dicts_with_source(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
sizes = [0 for _ in range(len(cfg.DATASETS.TRAIN))]
for d in dataset_dicts:
sizes[d['dataset_source']] += 1
print('dataset sizes', sizes)
else:
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
assert 0
# mapper = DatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
# TODO avoid if-else?
if sampler_name == "TrainingSampler":
sampler = TrainingSampler(len(dataset))
elif sampler_name == "MultiDatasetSampler":
assert source_aware
sampler = MultiDatasetSampler(cfg, sizes, dataset_dicts)
elif sampler_name == "RepeatFactorTrainingSampler":
repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
sampler = RepeatFactorTrainingSampler(repeat_factors)
elif sampler_name == "ClassAwareSampler":
sampler = ClassAwareSampler(dataset_dicts)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
return build_batch_data_loader(
dataset,
sampler,
cfg.SOLVER.IMS_PER_BATCH,
aspect_ratio_grouping=cfg.DATALOADER.ASPECT_RATIO_GROUPING,
num_workers=cfg.DATALOADER.NUM_WORKERS,
)
class ClassAwareSampler(Sampler):
def __init__(self, dataset_dicts, seed: Optional[int] = None):
"""
Args:
size (int): the total number of data of the underlying dataset to sample from
seed (int): the initial seed of the shuffle. Must be the same
across all workers. If None, will use a random seed shared
among workers (require synchronization among all workers).
"""
self._size = len(dataset_dicts)
assert self._size > 0
if seed is None:
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
self.weights = self._get_class_balance_factor(dataset_dicts)
def __iter__(self):
start = self._rank
yield from itertools.islice(
self._infinite_indices(), start, None, self._world_size)
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
ids = torch.multinomial(
self.weights, self._size, generator=g,
replacement=True)
yield from ids
def _get_class_balance_factor(self, dataset_dicts, l=1.):
# 1. For each category c, compute the fraction of images that contain it: f(c)
ret = []
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts: # For each image (without repeats)
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
for cat_id in cat_ids:
category_freq[cat_id] += 1
for i, dataset_dict in enumerate(dataset_dicts):
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
ret.append(sum(
[1. / (category_freq[cat_id] ** l) for cat_id in cat_ids]))
return torch.tensor(ret).float()
def get_detection_dataset_dicts_with_source(
dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
):
assert len(dataset_names)
dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
for dataset_name, dicts in zip(dataset_names, dataset_dicts):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
for source_id, (dataset_name, dicts) in \
enumerate(zip(dataset_names, dataset_dicts)):
assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
for d in dicts:
d['dataset_source'] = source_id
if "annotations" in dicts[0]:
try:
class_names = MetadataCatalog.get(dataset_name).thing_classes
check_metadata_consistency("thing_classes", dataset_name)
print_instances_class_histogram(dicts, class_names)
except AttributeError: # class names are not available for this dataset
pass
assert proposal_files is None
dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
has_instances = "annotations" in dataset_dicts[0]
if filter_empty and has_instances:
dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
if min_keypoints > 0 and has_instances:
dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
return dataset_dicts
class MultiDatasetSampler(Sampler):
def __init__(self, cfg, sizes, dataset_dicts, seed: Optional[int] = None):
"""
Args:
size (int): the total number of data of the underlying dataset to sample from
seed (int): the initial seed of the shuffle. Must be the same
across all workers. If None, will use a random seed shared
among workers (require synchronization among all workers).
"""
self.sizes = sizes
dataset_ratio = cfg.DATALOADER.DATASET_RATIO
self._batch_size = cfg.SOLVER.IMS_PER_BATCH
assert len(dataset_ratio) == len(sizes), \
'length of dataset ratio {} should be equal to number if dataset {}'.format(
len(dataset_ratio), len(sizes)
)
if seed is None:
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
self._ims_per_gpu = self._batch_size // self._world_size
self.dataset_ids = torch.tensor(
[d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \
for i, (r, s) in enumerate(zip(dataset_ratio, sizes))]
dataset_weight = torch.cat(dataset_weight)
self.weights = dataset_weight
self.sample_epoch_size = len(self.weights)
def __iter__(self):
start = self._rank
yield from itertools.islice(
self._infinite_indices(), start, None, self._world_size)
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
ids = torch.multinomial(
self.weights, self.sample_epoch_size, generator=g,
replacement=True)
nums = [(self.dataset_ids[ids] == i).sum().int().item() \
for i in range(len(self.sizes))]
print('_rank, len, nums', self._rank, len(ids), nums, flush=True)
# print('_rank, len, nums, self.dataset_ids[ids[:10]], ',
# self._rank, len(ids), nums, self.dataset_ids[ids[:10]],
# flush=True)
yield from ids | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/data/datasets/coco.py | Python | import os
from detectron2.data.datasets.register_coco import register_coco_instances
from detectron2.data.datasets.coco import load_coco_json
from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
from detectron2.data import DatasetCatalog, MetadataCatalog
def register_distill_coco_instances(name, metadata, json_file, image_root):
"""
add extra_annotation_keys
"""
assert isinstance(name, str), name
assert isinstance(json_file, (str, os.PathLike)), json_file
assert isinstance(image_root, (str, os.PathLike)), image_root
# 1. register a function which returns dicts
DatasetCatalog.register(name, lambda: load_coco_json(
json_file, image_root, name, extra_annotation_keys=['score']))
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
_PREDEFINED_SPLITS_COCO = {
"coco_2017_unlabeled": ("coco/unlabeled2017", "coco/annotations/image_info_unlabeled2017.json"),
}
for key, (image_root, json_file) in _PREDEFINED_SPLITS_COCO.items():
register_coco_instances(
key,
_get_builtin_metadata('coco'),
os.path.join("datasets", json_file) if "://" not in json_file else json_file,
os.path.join("datasets", image_root),
)
_PREDEFINED_SPLITS_DISTILL_COCO = {
"coco_un_yolov4_55_0.5": ("coco/unlabeled2017", "coco/annotations/yolov4_cocounlabeled_55_ann0.5.json"),
}
for key, (image_root, json_file) in _PREDEFINED_SPLITS_DISTILL_COCO.items():
register_distill_coco_instances(
key,
_get_builtin_metadata('coco'),
os.path.join("datasets", json_file) if "://" not in json_file else json_file,
os.path.join("datasets", image_root),
) | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/data/datasets/nuimages.py | Python | from detectron2.data.datasets.register_coco import register_coco_instances
import os
categories = [
{'id': 0, 'name': 'car'},
{'id': 1, 'name': 'truck'},
{'id': 2, 'name': 'trailer'},
{'id': 3, 'name': 'bus'},
{'id': 4, 'name': 'construction_vehicle'},
{'id': 5, 'name': 'bicycle'},
{'id': 6, 'name': 'motorcycle'},
{'id': 7, 'name': 'pedestrian'},
{'id': 8, 'name': 'traffic_cone'},
{'id': 9, 'name': 'barrier'},
]
def _get_builtin_metadata():
id_to_name = {x['id']: x['name'] for x in categories}
thing_dataset_id_to_contiguous_id = {i: i for i in range(len(categories))}
thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
return {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes}
_PREDEFINED_SPLITS = {
"nuimages_train": ("nuimages", "nuimages/annotations/nuimages_v1.0-train.json"),
"nuimages_val": ("nuimages", "nuimages/annotations/nuimages_v1.0-val.json"),
"nuimages_mini": ("nuimages", "nuimages/annotations/nuimages_v1.0-mini.json"),
}
for key, (image_root, json_file) in _PREDEFINED_SPLITS.items():
register_coco_instances(
key,
_get_builtin_metadata(),
os.path.join("datasets", json_file) if "://" not in json_file else json_file,
os.path.join("datasets", image_root),
)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/data/datasets/objects365.py | Python | from detectron2.data.datasets.register_coco import register_coco_instances
import os
categories_v1 = [
{'id': 164, 'name': 'cutting/chopping board'} ,
{'id': 49, 'name': 'tie'} ,
{'id': 306, 'name': 'crosswalk sign'} ,
{'id': 145, 'name': 'gun'} ,
{'id': 14, 'name': 'street lights'} ,
{'id': 223, 'name': 'bar soap'} ,
{'id': 74, 'name': 'wild bird'} ,
{'id': 219, 'name': 'ice cream'} ,
{'id': 37, 'name': 'stool'} ,
{'id': 25, 'name': 'storage box'} ,
{'id': 153, 'name': 'giraffe'} ,
{'id': 52, 'name': 'pen/pencil'} ,
{'id': 61, 'name': 'high heels'} ,
{'id': 340, 'name': 'mangosteen'} ,
{'id': 22, 'name': 'bracelet'} ,
{'id': 155, 'name': 'piano'} ,
{'id': 162, 'name': 'vent'} ,
{'id': 75, 'name': 'laptop'} ,
{'id': 236, 'name': 'toaster'} ,
{'id': 231, 'name': 'fire truck'} ,
{'id': 42, 'name': 'basket'} ,
{'id': 150, 'name': 'zebra'} ,
{'id': 124, 'name': 'head phone'} ,
{'id': 90, 'name': 'sheep'} ,
{'id': 322, 'name': 'steak'} ,
{'id': 39, 'name': 'couch'} ,
{'id': 209, 'name': 'toothbrush'} ,
{'id': 59, 'name': 'bicycle'} ,
{'id': 336, 'name': 'red cabbage'} ,
{'id': 228, 'name': 'golf ball'} ,
{'id': 120, 'name': 'tomato'} ,
{'id': 132, 'name': 'computer box'} ,
{'id': 8, 'name': 'cup'} ,
{'id': 183, 'name': 'basketball'} ,
{'id': 298, 'name': 'butterfly'} ,
{'id': 250, 'name': 'garlic'} ,
{'id': 12, 'name': 'desk'} ,
{'id': 141, 'name': 'microwave'} ,
{'id': 171, 'name': 'strawberry'} ,
{'id': 200, 'name': 'kettle'} ,
{'id': 63, 'name': 'van'} ,
{'id': 300, 'name': 'cheese'} ,
{'id': 215, 'name': 'marker'} ,
{'id': 100, 'name': 'blackboard/whiteboard'} ,
{'id': 186, 'name': 'printer'} ,
{'id': 333, 'name': 'bread/bun'} ,
{'id': 243, 'name': 'penguin'} ,
{'id': 364, 'name': 'iron'} ,
{'id': 180, 'name': 'ladder'} ,
{'id': 34, 'name': 'flag'} ,
{'id': 78, 'name': 'cell phone'} ,
{'id': 97, 'name': 'fan'} ,
{'id': 224, 'name': 'scale'} ,
{'id': 151, 'name': 'duck'} ,
{'id': 319, 'name': 'flute'} ,
{'id': 156, 'name': 'stop sign'} ,
{'id': 290, 'name': 'rickshaw'} ,
{'id': 128, 'name': 'sailboat'} ,
{'id': 165, 'name': 'tennis racket'} ,
{'id': 241, 'name': 'cigar'} ,
{'id': 101, 'name': 'balloon'} ,
{'id': 308, 'name': 'hair drier'} ,
{'id': 167, 'name': 'skating and skiing shoes'} ,
{'id': 237, 'name': 'helicopter'} ,
{'id': 65, 'name': 'sink'} ,
{'id': 129, 'name': 'tangerine'} ,
{'id': 330, 'name': 'crab'} ,
{'id': 320, 'name': 'measuring cup'} ,
{'id': 260, 'name': 'fishing rod'} ,
{'id': 346, 'name': 'saw'} ,
{'id': 216, 'name': 'ship'} ,
{'id': 46, 'name': 'coffee table'} ,
{'id': 194, 'name': 'facial mask'} ,
{'id': 281, 'name': 'stapler'} ,
{'id': 118, 'name': 'refrigerator'} ,
{'id': 40, 'name': 'belt'} ,
{'id': 349, 'name': 'starfish'} ,
{'id': 87, 'name': 'hanger'} ,
{'id': 116, 'name': 'baseball glove'} ,
{'id': 261, 'name': 'cherry'} ,
{'id': 334, 'name': 'baozi'} ,
{'id': 267, 'name': 'screwdriver'} ,
{'id': 158, 'name': 'converter'} ,
{'id': 335, 'name': 'lion'} ,
{'id': 170, 'name': 'baseball'} ,
{'id': 111, 'name': 'skis'} ,
{'id': 136, 'name': 'broccoli'} ,
{'id': 342, 'name': 'eraser'} ,
{'id': 337, 'name': 'polar bear'} ,
{'id': 139, 'name': 'shovel'} ,
{'id': 193, 'name': 'extension cord'} ,
{'id': 284, 'name': 'goldfish'} ,
{'id': 174, 'name': 'pepper'} ,
{'id': 138, 'name': 'stroller'} ,
{'id': 328, 'name': 'yak'} ,
{'id': 83, 'name': 'clock'} ,
{'id': 235, 'name': 'tricycle'} ,
{'id': 248, 'name': 'parking meter'} ,
{'id': 274, 'name': 'trophy'} ,
{'id': 324, 'name': 'binoculars'} ,
{'id': 51, 'name': 'traffic light'} ,
{'id': 314, 'name': 'donkey'} ,
{'id': 45, 'name': 'barrel/bucket'} ,
{'id': 292, 'name': 'pomegranate'} ,
{'id': 13, 'name': 'handbag'} ,
{'id': 262, 'name': 'tablet'} ,
{'id': 68, 'name': 'apple'} ,
{'id': 226, 'name': 'cabbage'} ,
{'id': 23, 'name': 'flower'} ,
{'id': 58, 'name': 'faucet'} ,
{'id': 206, 'name': 'tong'} ,
{'id': 291, 'name': 'trombone'} ,
{'id': 160, 'name': 'carrot'} ,
{'id': 172, 'name': 'bow tie'} ,
{'id': 122, 'name': 'tent'} ,
{'id': 163, 'name': 'cookies'} ,
{'id': 115, 'name': 'remote'} ,
{'id': 175, 'name': 'coffee machine'} ,
{'id': 238, 'name': 'green beans'} ,
{'id': 233, 'name': 'cello'} ,
{'id': 28, 'name': 'wine glass'} ,
{'id': 295, 'name': 'mushroom'} ,
{'id': 344, 'name': 'scallop'} ,
{'id': 125, 'name': 'lantern'} ,
{'id': 123, 'name': 'shampoo/shower gel'} ,
{'id': 285, 'name': 'meat balls'} ,
{'id': 266, 'name': 'key'} ,
{'id': 296, 'name': 'calculator'} ,
{'id': 168, 'name': 'scissors'} ,
{'id': 103, 'name': 'cymbal'} ,
{'id': 6, 'name': 'bottle'} ,
{'id': 264, 'name': 'nuts'} ,
{'id': 234, 'name': 'notepaper'} ,
{'id': 211, 'name': 'mango'} ,
{'id': 287, 'name': 'toothpaste'} ,
{'id': 196, 'name': 'chopsticks'} ,
{'id': 140, 'name': 'baseball bat'} ,
{'id': 244, 'name': 'hurdle'} ,
{'id': 195, 'name': 'tennis ball'} ,
{'id': 144, 'name': 'surveillance camera'} ,
{'id': 271, 'name': 'volleyball'} ,
{'id': 94, 'name': 'keyboard'} ,
{'id': 339, 'name': 'seal'} ,
{'id': 11, 'name': 'picture/frame'} ,
{'id': 348, 'name': 'okra'} ,
{'id': 191, 'name': 'sausage'} ,
{'id': 166, 'name': 'candy'} ,
{'id': 62, 'name': 'ring'} ,
{'id': 311, 'name': 'dolphin'} ,
{'id': 273, 'name': 'eggplant'} ,
{'id': 84, 'name': 'drum'} ,
{'id': 143, 'name': 'surfboard'} ,
{'id': 288, 'name': 'antelope'} ,
{'id': 204, 'name': 'clutch'} ,
{'id': 207, 'name': 'slide'} ,
{'id': 43, 'name': 'towel/napkin'} ,
{'id': 352, 'name': 'durian'} ,
{'id': 276, 'name': 'board eraser'} ,
{'id': 315, 'name': 'electric drill'} ,
{'id': 312, 'name': 'sushi'} ,
{'id': 198, 'name': 'pie'} ,
{'id': 106, 'name': 'pickup truck'} ,
{'id': 176, 'name': 'bathtub'} ,
{'id': 26, 'name': 'vase'} ,
{'id': 133, 'name': 'elephant'} ,
{'id': 256, 'name': 'sandwich'} ,
{'id': 327, 'name': 'noodles'} ,
{'id': 10, 'name': 'glasses'} ,
{'id': 109, 'name': 'airplane'} ,
{'id': 95, 'name': 'tripod'} ,
{'id': 247, 'name': 'CD'} ,
{'id': 121, 'name': 'machinery vehicle'} ,
{'id': 365, 'name': 'flashlight'} ,
{'id': 53, 'name': 'microphone'} ,
{'id': 270, 'name': 'pliers'} ,
{'id': 362, 'name': 'chainsaw'} ,
{'id': 259, 'name': 'bear'} ,
{'id': 197, 'name': 'electronic stove and gas stove'} ,
{'id': 89, 'name': 'pot/pan'} ,
{'id': 220, 'name': 'tape'} ,
{'id': 338, 'name': 'lighter'} ,
{'id': 177, 'name': 'snowboard'} ,
{'id': 214, 'name': 'violin'} ,
{'id': 217, 'name': 'chicken'} ,
{'id': 2, 'name': 'sneakers'} ,
{'id': 161, 'name': 'washing machine'} ,
{'id': 131, 'name': 'kite'} ,
{'id': 354, 'name': 'rabbit'} ,
{'id': 86, 'name': 'bus'} ,
{'id': 275, 'name': 'dates'} ,
{'id': 282, 'name': 'camel'} ,
{'id': 88, 'name': 'nightstand'} ,
{'id': 179, 'name': 'grapes'} ,
{'id': 229, 'name': 'pine apple'} ,
{'id': 56, 'name': 'necklace'} ,
{'id': 18, 'name': 'leather shoes'} ,
{'id': 358, 'name': 'hoverboard'} ,
{'id': 345, 'name': 'pencil case'} ,
{'id': 359, 'name': 'pasta'} ,
{'id': 157, 'name': 'radiator'} ,
{'id': 201, 'name': 'hamburger'} ,
{'id': 268, 'name': 'globe'} ,
{'id': 332, 'name': 'barbell'} ,
{'id': 329, 'name': 'mop'} ,
{'id': 252, 'name': 'horn'} ,
{'id': 350, 'name': 'eagle'} ,
{'id': 169, 'name': 'folder'} ,
{'id': 137, 'name': 'toilet'} ,
{'id': 5, 'name': 'lamp'} ,
{'id': 27, 'name': 'bench'} ,
{'id': 249, 'name': 'swan'} ,
{'id': 76, 'name': 'knife'} ,
{'id': 341, 'name': 'comb'} ,
{'id': 64, 'name': 'watch'} ,
{'id': 105, 'name': 'telephone'} ,
{'id': 3, 'name': 'chair'} ,
{'id': 33, 'name': 'boat'} ,
{'id': 107, 'name': 'orange'} ,
{'id': 60, 'name': 'bread'} ,
{'id': 147, 'name': 'cat'} ,
{'id': 135, 'name': 'gas stove'} ,
{'id': 307, 'name': 'papaya'} ,
{'id': 227, 'name': 'router/modem'} ,
{'id': 357, 'name': 'asparagus'} ,
{'id': 73, 'name': 'motorcycle'} ,
{'id': 77, 'name': 'traffic sign'} ,
{'id': 67, 'name': 'fish'} ,
{'id': 326, 'name': 'radish'} ,
{'id': 213, 'name': 'egg'} ,
{'id': 203, 'name': 'cucumber'} ,
{'id': 17, 'name': 'helmet'} ,
{'id': 110, 'name': 'luggage'} ,
{'id': 80, 'name': 'truck'} ,
{'id': 199, 'name': 'frisbee'} ,
{'id': 232, 'name': 'peach'} ,
{'id': 1, 'name': 'person'} ,
{'id': 29, 'name': 'boots'} ,
{'id': 310, 'name': 'chips'} ,
{'id': 142, 'name': 'skateboard'} ,
{'id': 44, 'name': 'slippers'} ,
{'id': 4, 'name': 'hat'} ,
{'id': 178, 'name': 'suitcase'} ,
{'id': 24, 'name': 'tv'} ,
{'id': 119, 'name': 'train'} ,
{'id': 82, 'name': 'power outlet'} ,
{'id': 245, 'name': 'swing'} ,
{'id': 15, 'name': 'book'} ,
{'id': 294, 'name': 'jellyfish'} ,
{'id': 192, 'name': 'fire extinguisher'} ,
{'id': 212, 'name': 'deer'} ,
{'id': 181, 'name': 'pear'} ,
{'id': 347, 'name': 'table tennis paddle'} ,
{'id': 113, 'name': 'trolley'} ,
{'id': 91, 'name': 'guitar'} ,
{'id': 202, 'name': 'golf club'} ,
{'id': 221, 'name': 'wheelchair'} ,
{'id': 254, 'name': 'saxophone'} ,
{'id': 117, 'name': 'paper towel'} ,
{'id': 303, 'name': 'race car'} ,
{'id': 240, 'name': 'carriage'} ,
{'id': 246, 'name': 'radio'} ,
{'id': 318, 'name': 'parrot'} ,
{'id': 251, 'name': 'french fries'} ,
{'id': 98, 'name': 'dog'} ,
{'id': 112, 'name': 'soccer'} ,
{'id': 355, 'name': 'french horn'} ,
{'id': 79, 'name': 'paddle'} ,
{'id': 283, 'name': 'lettuce'} ,
{'id': 9, 'name': 'car'} ,
{'id': 258, 'name': 'kiwi fruit'} ,
{'id': 325, 'name': 'llama'} ,
{'id': 187, 'name': 'billiards'} ,
{'id': 210, 'name': 'facial cleanser'} ,
{'id': 81, 'name': 'cow'} ,
{'id': 331, 'name': 'microscope'} ,
{'id': 148, 'name': 'lemon'} ,
{'id': 302, 'name': 'pomelo'} ,
{'id': 85, 'name': 'fork'} ,
{'id': 154, 'name': 'pumpkin'} ,
{'id': 289, 'name': 'shrimp'} ,
{'id': 71, 'name': 'teddy bear'} ,
{'id': 184, 'name': 'potato'} ,
{'id': 102, 'name': 'air conditioner'} ,
{'id': 208, 'name': 'hot dog'} ,
{'id': 222, 'name': 'plum'} ,
{'id': 316, 'name': 'spring rolls'} ,
{'id': 230, 'name': 'crane'} ,
{'id': 149, 'name': 'liquid soap'} ,
{'id': 55, 'name': 'canned'} ,
{'id': 35, 'name': 'speaker'} ,
{'id': 108, 'name': 'banana'} ,
{'id': 297, 'name': 'treadmill'} ,
{'id': 99, 'name': 'spoon'} ,
{'id': 104, 'name': 'mouse'} ,
{'id': 182, 'name': 'american football'} ,
{'id': 299, 'name': 'egg tart'} ,
{'id': 127, 'name': 'cleaning products'} ,
{'id': 313, 'name': 'urinal'} ,
{'id': 286, 'name': 'medal'} ,
{'id': 239, 'name': 'brush'} ,
{'id': 96, 'name': 'hockey'} ,
{'id': 279, 'name': 'dumbbell'} ,
{'id': 32, 'name': 'umbrella'} ,
{'id': 272, 'name': 'hammer'} ,
{'id': 16, 'name': 'plate'} ,
{'id': 21, 'name': 'potted plant'} ,
{'id': 242, 'name': 'earphone'} ,
{'id': 70, 'name': 'candle'} ,
{'id': 185, 'name': 'paint brush'} ,
{'id': 48, 'name': 'toy'} ,
{'id': 130, 'name': 'pizza'} ,
{'id': 255, 'name': 'trumpet'} ,
{'id': 361, 'name': 'hotair balloon'} ,
{'id': 188, 'name': 'fire hydrant'} ,
{'id': 50, 'name': 'bed'} ,
{'id': 253, 'name': 'avocado'} ,
{'id': 293, 'name': 'coconut'} ,
{'id': 257, 'name': 'cue'} ,
{'id': 280, 'name': 'hamimelon'} ,
{'id': 66, 'name': 'horse'} ,
{'id': 173, 'name': 'pigeon'} ,
{'id': 190, 'name': 'projector'} ,
{'id': 69, 'name': 'camera'} ,
{'id': 30, 'name': 'bowl'} ,
{'id': 269, 'name': 'broom'} ,
{'id': 343, 'name': 'pitaya'} ,
{'id': 305, 'name': 'tuba'} ,
{'id': 309, 'name': 'green onion'} ,
{'id': 363, 'name': 'lobster'} ,
{'id': 225, 'name': 'watermelon'} ,
{'id': 47, 'name': 'suv'} ,
{'id': 31, 'name': 'dining table'} ,
{'id': 54, 'name': 'sandals'} ,
{'id': 351, 'name': 'monkey'} ,
{'id': 218, 'name': 'onion'} ,
{'id': 36, 'name': 'trash bin/can'} ,
{'id': 20, 'name': 'glove'} ,
{'id': 277, 'name': 'rice'} ,
{'id': 152, 'name': 'sports car'} ,
{'id': 360, 'name': 'target'} ,
{'id': 205, 'name': 'blender'} ,
{'id': 19, 'name': 'pillow'} ,
{'id': 72, 'name': 'cake'} ,
{'id': 93, 'name': 'tea pot'} ,
{'id': 353, 'name': 'game board'} ,
{'id': 38, 'name': 'backpack'} ,
{'id': 356, 'name': 'ambulance'} ,
{'id': 146, 'name': 'life saver'} ,
{'id': 189, 'name': 'goose'} ,
{'id': 278, 'name': 'tape measure/ruler'} ,
{'id': 92, 'name': 'traffic cone'} ,
{'id': 134, 'name': 'toiletries'} ,
{'id': 114, 'name': 'oven'} ,
{'id': 317, 'name': 'tortoise/turtle'} ,
{'id': 265, 'name': 'corn'} ,
{'id': 126, 'name': 'donut'} ,
{'id': 57, 'name': 'mirror'} ,
{'id': 7, 'name': 'cabinet/shelf'} ,
{'id': 263, 'name': 'green vegetables'} ,
{'id': 159, 'name': 'tissue '} ,
{'id': 321, 'name': 'shark'} ,
{'id': 301, 'name': 'pig'} ,
{'id': 41, 'name': 'carpet'} ,
{'id': 304, 'name': 'rice cooker'} ,
{'id': 323, 'name': 'poker card'} ,
]
def _get_builtin_metadata(version):
if version == 'v1':
id_to_name = {x['id']: x['name'] for x in categories_v1}
else:
assert 0, version
thing_dataset_id_to_contiguous_id = {i + 1: i for i in range(365)}
thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
return {
"thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
"thing_classes": thing_classes}
_PREDEFINED_SPLITS_OBJECTS365 = {
"objects365_train": ("objects365/train", "objects365/annotations/objects365_train.json"),
"objects365_val": ("objects365/val", "objects365/annotations/objects365_val.json"),
}
for key, (image_root, json_file) in _PREDEFINED_SPLITS_OBJECTS365.items():
register_coco_instances(
key,
_get_builtin_metadata('v1'),
os.path.join("datasets", json_file) if "://" not in json_file else json_file,
os.path.join("datasets", image_root),
)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/data/transforms/custom_augmentation_impl.py | Python | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified by Xingyi Zhou
"""
Implement many useful :class:`Augmentation`.
"""
import numpy as np
import sys
from fvcore.transforms.transform import (
BlendTransform,
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
VFlipTransform,
)
from PIL import Image
from detectron2.data.transforms.augmentation import Augmentation
from .custom_transform import EfficientDetResizeCropTransform
__all__ = [
"EfficientDetResizeCrop",
]
class EfficientDetResizeCrop(Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, size, scale, interp=Image.BILINEAR
):
"""
Args:
"""
super().__init__()
self.target_size = (size, size)
self.scale = scale
self.interp = interp
def get_transform(self, img):
# Select a random scale factor.
scale_factor = np.random.uniform(*self.scale)
scaled_target_height = scale_factor * self.target_size[0]
scaled_target_width = scale_factor * self.target_size[1]
# Recompute the accurate scale_factor using rounded scaled image size.
width, height = img.shape[1], img.shape[0]
img_scale_y = scaled_target_height / height
img_scale_x = scaled_target_width / width
img_scale = min(img_scale_y, img_scale_x)
# Select non-zero random offset (x, y) if scaled image is larger than target size
scaled_h = int(height * img_scale)
scaled_w = int(width * img_scale)
offset_y = scaled_h - self.target_size[0]
offset_x = scaled_w - self.target_size[1]
offset_y = int(max(0.0, float(offset_y)) * np.random.uniform(0, 1))
offset_x = int(max(0.0, float(offset_x)) * np.random.uniform(0, 1))
return EfficientDetResizeCropTransform(
scaled_h, scaled_w, offset_y, offset_x, img_scale, self.target_size, self.interp)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/data/transforms/custom_transform.py | Python | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified by Xingyi Zhou
# File: transform.py
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.transforms.transform import (
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
TransformList,
)
from PIL import Image
try:
import cv2 # noqa
except ImportError:
# OpenCV is an optional dependency at the moment
pass
__all__ = [
"EfficientDetResizeCropTransform",
]
class EfficientDetResizeCropTransform(Transform):
"""
"""
def __init__(self, scaled_h, scaled_w, offset_y, offset_x, img_scale, target_size, interp=None):
"""
Args:
h, w (int): original image size
new_h, new_w (int): new image size
interp: PIL interpolation methods, defaults to bilinear.
"""
# TODO decide on PIL vs opencv
super().__init__()
if interp is None:
interp = Image.BILINEAR
self._set_attributes(locals())
def apply_image(self, img, interp=None):
# assert img.shape[:2] == (self.h, self.w)
assert len(img.shape) <= 4
if img.dtype == np.uint8:
pil_image = Image.fromarray(img)
interp_method = interp if interp is not None else self.interp
pil_image = pil_image.resize((self.scaled_w, self.scaled_h), interp_method)
ret = np.asarray(pil_image)
right = min(self.scaled_w, self.offset_x + self.target_size[1])
lower = min(self.scaled_h, self.offset_y + self.target_size[0])
# img = img.crop((self.offset_x, self.offset_y, right, lower))
if len(ret.shape) <= 3:
ret = ret[self.offset_y: lower, self.offset_x: right]
else:
ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
else:
# PIL only supports uint8
img = torch.from_numpy(img)
shape = list(img.shape)
shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
_PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"}
mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
img = F.interpolate(img, (self.scaled_h, self.scaled_w), mode=mode, align_corners=False)
shape[:2] = (self.scaled_h, self.scaled_w)
ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
right = min(self.scaled_w, self.offset_x + self.target_size[1])
lower = min(self.scaled_h, self.offset_y + self.target_size[0])
if len(ret.shape) <= 3:
ret = ret[self.offset_y: lower, self.offset_x: right]
else:
ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * self.img_scale
coords[:, 1] = coords[:, 1] * self.img_scale
coords[:, 0] -= self.offset_x
coords[:, 1] -= self.offset_y
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
def inverse(self):
raise NotImplementedError
# return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp) | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/backbone/bifpn.py | Python | # Modified from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/efficientdet.py
# The original file is under Apache-2.0 License
import math
from os.path import join
import numpy as np
from collections import OrderedDict
from typing import List
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import fvcore.nn.weight_init as weight_init
from detectron2.layers import ShapeSpec, Conv2d
from detectron2.modeling.backbone.resnet import build_resnet_backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.layers.batch_norm import get_norm
from detectron2.modeling.backbone import Backbone
from .dlafpn import dla34
def get_fpn_config(base_reduction=8):
"""BiFPN config with sum."""
p = {
'nodes': [
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 4]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 5]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 6]},
{'reduction': base_reduction, 'inputs_offsets': [0, 7]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 7, 8]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 6, 9]},
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 5, 10]},
{'reduction': base_reduction << 4, 'inputs_offsets': [4, 11]},
],
'weight_method': 'fastattn',
}
return p
def swish(x, inplace: bool = False):
"""Swish - Described in: https://arxiv.org/abs/1710.05941
"""
return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid())
class Swish(nn.Module):
def __init__(self, inplace: bool = False):
super(Swish, self).__init__()
self.inplace = inplace
def forward(self, x):
return swish(x, self.inplace)
class SequentialAppend(nn.Sequential):
def __init__(self, *args):
super(SequentialAppend, self).__init__(*args)
def forward(self, x):
for module in self:
x.append(module(x))
return x
class SequentialAppendLast(nn.Sequential):
def __init__(self, *args):
super(SequentialAppendLast, self).__init__(*args)
# def forward(self, x: List[torch.Tensor]):
def forward(self, x):
for module in self:
x.append(module(x[-1]))
return x
class ConvBnAct2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding='', bias=False,
norm='', act_layer=Swish):
super(ConvBnAct2d, self).__init__()
# self.conv = create_conv2d(
# in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias)
self.conv = Conv2d(
in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=kernel_size // 2, bias=(norm == ''))
self.bn = get_norm(norm, out_channels)
self.act = None if act_layer is None else act_layer(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class SeparableConv2d(nn.Module):
""" Separable Conv
"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, padding='', bias=False,
channel_multiplier=1.0, pw_kernel_size=1, act_layer=Swish,
norm=''):
super(SeparableConv2d, self).__init__()
# self.conv_dw = create_conv2d(
# in_channels, int(in_channels * channel_multiplier), kernel_size,
# stride=stride, dilation=dilation, padding=padding, depthwise=True)
self.conv_dw = Conv2d(
in_channels, int(in_channels * channel_multiplier),
kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, bias=bias,
groups=out_channels)
# print('conv_dw', kernel_size, stride)
# self.conv_pw = create_conv2d(
# int(in_channels * channel_multiplier), out_channels, pw_kernel_size, padding=padding, bias=bias)
self.conv_pw = Conv2d(
int(in_channels * channel_multiplier), out_channels,
kernel_size=pw_kernel_size, padding=pw_kernel_size // 2, bias=(norm==''))
# print('conv_pw', pw_kernel_size)
self.bn = get_norm(norm, out_channels)
self.act = None if act_layer is None else act_layer(inplace=True)
def forward(self, x):
x = self.conv_dw(x)
x = self.conv_pw(x)
if self.bn is not None:
x = self.bn(x)
if self.act is not None:
x = self.act(x)
return x
class ResampleFeatureMap(nn.Sequential):
def __init__(self, in_channels, out_channels, reduction_ratio=1., pad_type='', pooling_type='max',
norm='', apply_bn=False, conv_after_downsample=False,
redundant_bias=False):
super(ResampleFeatureMap, self).__init__()
pooling_type = pooling_type or 'max'
self.in_channels = in_channels
self.out_channels = out_channels
self.reduction_ratio = reduction_ratio
self.conv_after_downsample = conv_after_downsample
conv = None
if in_channels != out_channels:
conv = ConvBnAct2d(
in_channels, out_channels, kernel_size=1, padding=pad_type,
norm=norm if apply_bn else '',
bias=not apply_bn or redundant_bias, act_layer=None)
if reduction_ratio > 1:
stride_size = int(reduction_ratio)
if conv is not None and not self.conv_after_downsample:
self.add_module('conv', conv)
self.add_module(
'downsample',
# create_pool2d(
# pooling_type, kernel_size=stride_size + 1, stride=stride_size, padding=pad_type)
# nn.MaxPool2d(kernel_size=stride_size + 1, stride=stride_size, padding=pad_type)
nn.MaxPool2d(kernel_size=stride_size, stride=stride_size)
)
if conv is not None and self.conv_after_downsample:
self.add_module('conv', conv)
else:
if conv is not None:
self.add_module('conv', conv)
if reduction_ratio < 1:
scale = int(1 // reduction_ratio)
self.add_module('upsample', nn.UpsamplingNearest2d(scale_factor=scale))
class FpnCombine(nn.Module):
def __init__(self, feature_info, fpn_config, fpn_channels, inputs_offsets, target_reduction, pad_type='',
pooling_type='max', norm='', apply_bn_for_resampling=False,
conv_after_downsample=False, redundant_bias=False, weight_method='attn'):
super(FpnCombine, self).__init__()
self.inputs_offsets = inputs_offsets
self.weight_method = weight_method
self.resample = nn.ModuleDict()
for idx, offset in enumerate(inputs_offsets):
in_channels = fpn_channels
if offset < len(feature_info):
in_channels = feature_info[offset]['num_chs']
input_reduction = feature_info[offset]['reduction']
else:
node_idx = offset - len(feature_info)
# print('node_idx, len', node_idx, len(fpn_config['nodes']))
input_reduction = fpn_config['nodes'][node_idx]['reduction']
reduction_ratio = target_reduction / input_reduction
self.resample[str(offset)] = ResampleFeatureMap(
in_channels, fpn_channels, reduction_ratio=reduction_ratio, pad_type=pad_type,
pooling_type=pooling_type, norm=norm,
apply_bn=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample,
redundant_bias=redundant_bias)
if weight_method == 'attn' or weight_method == 'fastattn':
# WSM
self.edge_weights = nn.Parameter(torch.ones(len(inputs_offsets)), requires_grad=True)
else:
self.edge_weights = None
def forward(self, x):
dtype = x[0].dtype
nodes = []
for offset in self.inputs_offsets:
input_node = x[offset]
input_node = self.resample[str(offset)](input_node)
nodes.append(input_node)
if self.weight_method == 'attn':
normalized_weights = torch.softmax(self.edge_weights.type(dtype), dim=0)
x = torch.stack(nodes, dim=-1) * normalized_weights
elif self.weight_method == 'fastattn':
edge_weights = nn.functional.relu(self.edge_weights.type(dtype))
weights_sum = torch.sum(edge_weights)
x = torch.stack(
[(nodes[i] * edge_weights[i]) / (weights_sum + 0.0001) for i in range(len(nodes))], dim=-1)
elif self.weight_method == 'sum':
x = torch.stack(nodes, dim=-1)
else:
raise ValueError('unknown weight_method {}'.format(self.weight_method))
x = torch.sum(x, dim=-1)
return x
class BiFpnLayer(nn.Module):
def __init__(self, feature_info, fpn_config, fpn_channels, num_levels=5, pad_type='',
pooling_type='max', norm='', act_layer=Swish,
apply_bn_for_resampling=False, conv_after_downsample=True, conv_bn_relu_pattern=False,
separable_conv=True, redundant_bias=False):
super(BiFpnLayer, self).__init__()
self.fpn_config = fpn_config
self.num_levels = num_levels
self.conv_bn_relu_pattern = False
self.feature_info = []
self.fnode = SequentialAppend()
for i, fnode_cfg in enumerate(fpn_config['nodes']):
# logging.debug('fnode {} : {}'.format(i, fnode_cfg))
# print('fnode {} : {}'.format(i, fnode_cfg))
fnode_layers = OrderedDict()
# combine features
reduction = fnode_cfg['reduction']
fnode_layers['combine'] = FpnCombine(
feature_info, fpn_config, fpn_channels, fnode_cfg['inputs_offsets'], target_reduction=reduction,
pad_type=pad_type, pooling_type=pooling_type, norm=norm,
apply_bn_for_resampling=apply_bn_for_resampling, conv_after_downsample=conv_after_downsample,
redundant_bias=redundant_bias, weight_method=fpn_config['weight_method'])
self.feature_info.append(dict(num_chs=fpn_channels, reduction=reduction))
# after combine ops
after_combine = OrderedDict()
if not conv_bn_relu_pattern:
after_combine['act'] = act_layer(inplace=True)
conv_bias = redundant_bias
conv_act = None
else:
conv_bias = False
conv_act = act_layer
conv_kwargs = dict(
in_channels=fpn_channels, out_channels=fpn_channels, kernel_size=3, padding=pad_type,
bias=conv_bias, norm=norm, act_layer=conv_act)
after_combine['conv'] = SeparableConv2d(**conv_kwargs) if separable_conv else ConvBnAct2d(**conv_kwargs)
fnode_layers['after_combine'] = nn.Sequential(after_combine)
self.fnode.add_module(str(i), nn.Sequential(fnode_layers))
self.feature_info = self.feature_info[-num_levels::]
def forward(self, x):
x = self.fnode(x)
return x[-self.num_levels::]
class BiFPN(Backbone):
def __init__(
self, cfg, bottom_up, in_features, out_channels, norm='',
num_levels=5, num_bifpn=4, separable_conv=False,
):
super(BiFPN, self).__init__()
assert isinstance(bottom_up, Backbone)
# Feature map strides and channels from the bottom up network (e.g. ResNet)
input_shapes = bottom_up.output_shape()
in_strides = [input_shapes[f].stride for f in in_features]
in_channels = [input_shapes[f].channels for f in in_features]
self.num_levels = num_levels
self.num_bifpn = num_bifpn
self.bottom_up = bottom_up
self.in_features = in_features
self._size_divisibility = 128
levels = [int(math.log2(s)) for s in in_strides]
self._out_feature_strides = {
"p{}".format(int(math.log2(s))): s for s in in_strides}
if len(in_features) < num_levels:
for l in range(num_levels - len(in_features)):
s = l + levels[-1]
self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
self._out_features = list(sorted(self._out_feature_strides.keys()))
self._out_feature_channels = {k: out_channels for k in self._out_features}
# print('self._out_feature_strides', self._out_feature_strides)
# print('self._out_feature_channels', self._out_feature_channels)
feature_info = [
{'num_chs': in_channels[level], 'reduction': in_strides[level]} \
for level in range(len(self.in_features))
]
# self.config = config
fpn_config = get_fpn_config()
self.resample = SequentialAppendLast()
for level in range(num_levels):
if level < len(feature_info):
in_chs = in_channels[level] # feature_info[level]['num_chs']
reduction = in_strides[level] # feature_info[level]['reduction']
else:
# Adds a coarser level by downsampling the last feature map
reduction_ratio = 2
self.resample.add_module(str(level), ResampleFeatureMap(
in_channels=in_chs,
out_channels=out_channels,
pad_type='same',
pooling_type=None,
norm=norm,
reduction_ratio=reduction_ratio,
apply_bn=True,
conv_after_downsample=False,
redundant_bias=False,
))
in_chs = out_channels
reduction = int(reduction * reduction_ratio)
feature_info.append(dict(num_chs=in_chs, reduction=reduction))
self.cell = nn.Sequential()
for rep in range(self.num_bifpn):
# logging.debug('building cell {}'.format(rep))
# print('building cell {}'.format(rep))
fpn_layer = BiFpnLayer(
feature_info=feature_info,
fpn_config=fpn_config,
fpn_channels=out_channels,
num_levels=self.num_levels,
pad_type='same',
pooling_type=None,
norm=norm,
act_layer=Swish,
separable_conv=separable_conv,
apply_bn_for_resampling=True,
conv_after_downsample=False,
conv_bn_relu_pattern=False,
redundant_bias=False,
)
self.cell.add_module(str(rep), fpn_layer)
feature_info = fpn_layer.feature_info
# import pdb; pdb.set_trace()
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
# print('input shapes', x.shape)
bottom_up_features = self.bottom_up(x)
x = [bottom_up_features[f] for f in self.in_features]
assert len(self.resample) == self.num_levels - len(x)
x = self.resample(x)
shapes = [xx.shape for xx in x]
# print('resample shapes', shapes)
x = self.cell(x)
out = {f: xx for f, xx in zip(self._out_features, x)}
# import pdb; pdb.set_trace()
return out
@BACKBONE_REGISTRY.register()
def build_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
backbone = BiFPN(
cfg=cfg,
bottom_up=bottom_up,
in_features=in_features,
out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS,
norm=cfg.MODEL.BIFPN.NORM,
num_levels=cfg.MODEL.BIFPN.NUM_LEVELS,
num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN,
separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_p37_dla_bifpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = dla34(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
assert cfg.MODEL.BIFPN.NUM_LEVELS == 5
backbone = BiFPN(
cfg=cfg,
bottom_up=bottom_up,
in_features=in_features,
out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS,
norm=cfg.MODEL.BIFPN.NORM,
num_levels=cfg.MODEL.BIFPN.NUM_LEVELS,
num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN,
separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV,
)
return backbone
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/backbone/bifpn_fcos.py | Python | # This file is modified from https://github.com/aim-uofa/AdelaiDet/blob/master/adet/modeling/backbone/bifpn.py
# The original file is under 2-clause BSD License for academic use, and *non-commercial use*.
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling.backbone import Backbone, build_resnet_backbone
from detectron2.modeling import BACKBONE_REGISTRY
from .dlafpn import dla34
__all__ = []
def swish(x):
return x * x.sigmoid()
def split_name(name):
for i, c in enumerate(name):
if not c.isalpha():
return name[:i], int(name[i:])
raise ValueError()
class FeatureMapResampler(nn.Module):
def __init__(self, in_channels, out_channels, stride, norm=""):
super(FeatureMapResampler, self).__init__()
if in_channels != out_channels:
self.reduction = Conv2d(
in_channels, out_channels, kernel_size=1,
bias=(norm == ""),
norm=get_norm(norm, out_channels),
activation=None
)
else:
self.reduction = None
assert stride <= 2
self.stride = stride
def forward(self, x):
if self.reduction is not None:
x = self.reduction(x)
if self.stride == 2:
x = F.max_pool2d(
x, kernel_size=self.stride + 1,
stride=self.stride, padding=1
)
elif self.stride == 1:
pass
else:
raise NotImplementedError()
return x
class BackboneWithTopLevels(Backbone):
def __init__(self, backbone, out_channels, num_top_levels, norm=""):
super(BackboneWithTopLevels, self).__init__()
self.backbone = backbone
backbone_output_shape = backbone.output_shape()
self._out_feature_channels = {name: shape.channels for name, shape in backbone_output_shape.items()}
self._out_feature_strides = {name: shape.stride for name, shape in backbone_output_shape.items()}
self._out_features = list(self._out_feature_strides.keys())
last_feature_name = max(self._out_feature_strides.keys(), key=lambda x: split_name(x)[1])
self.last_feature_name = last_feature_name
self.num_top_levels = num_top_levels
last_channels = self._out_feature_channels[last_feature_name]
last_stride = self._out_feature_strides[last_feature_name]
prefix, suffix = split_name(last_feature_name)
prev_channels = last_channels
for i in range(num_top_levels):
name = prefix + str(suffix + i + 1)
self.add_module(name, FeatureMapResampler(
prev_channels, out_channels, 2, norm
))
prev_channels = out_channels
self._out_feature_channels[name] = out_channels
self._out_feature_strides[name] = last_stride * 2 ** (i + 1)
self._out_features.append(name)
def forward(self, x):
outputs = self.backbone(x)
last_features = outputs[self.last_feature_name]
prefix, suffix = split_name(self.last_feature_name)
x = last_features
for i in range(self.num_top_levels):
name = prefix + str(suffix + i + 1)
x = self.__getattr__(name)(x)
outputs[name] = x
return outputs
class SingleBiFPN(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self, in_channels_list, out_channels, norm=""
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
norm (str): the normalization to use.
"""
super(SingleBiFPN, self).__init__()
self.out_channels = out_channels
# build 5-levels bifpn
if len(in_channels_list) == 5:
self.nodes = [
{'feat_level': 3, 'inputs_offsets': [3, 4]},
{'feat_level': 2, 'inputs_offsets': [2, 5]},
{'feat_level': 1, 'inputs_offsets': [1, 6]},
{'feat_level': 0, 'inputs_offsets': [0, 7]},
{'feat_level': 1, 'inputs_offsets': [1, 7, 8]},
{'feat_level': 2, 'inputs_offsets': [2, 6, 9]},
{'feat_level': 3, 'inputs_offsets': [3, 5, 10]},
{'feat_level': 4, 'inputs_offsets': [4, 11]},
]
elif len(in_channels_list) == 3:
self.nodes = [
{'feat_level': 1, 'inputs_offsets': [1, 2]},
{'feat_level': 0, 'inputs_offsets': [0, 3]},
{'feat_level': 1, 'inputs_offsets': [1, 3, 4]},
{'feat_level': 2, 'inputs_offsets': [2, 5]},
]
else:
raise NotImplementedError
node_info = [_ for _ in in_channels_list]
num_output_connections = [0 for _ in in_channels_list]
for fnode in self.nodes:
feat_level = fnode["feat_level"]
inputs_offsets = fnode["inputs_offsets"]
inputs_offsets_str = "_".join(map(str, inputs_offsets))
for input_offset in inputs_offsets:
num_output_connections[input_offset] += 1
in_channels = node_info[input_offset]
if in_channels != out_channels:
lateral_conv = Conv2d(
in_channels,
out_channels,
kernel_size=1,
norm=get_norm(norm, out_channels)
)
self.add_module(
"lateral_{}_f{}".format(input_offset, feat_level), lateral_conv
)
node_info.append(out_channels)
num_output_connections.append(0)
# generate attention weights
name = "weights_f{}_{}".format(feat_level, inputs_offsets_str)
self.__setattr__(name, nn.Parameter(
torch.ones(len(inputs_offsets), dtype=torch.float32),
requires_grad=True
))
# generate convolutions after combination
name = "outputs_f{}_{}".format(feat_level, inputs_offsets_str)
self.add_module(name, Conv2d(
out_channels,
out_channels,
kernel_size=3,
padding=1,
norm=get_norm(norm, out_channels),
bias=(norm == "")
))
def forward(self, feats):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "p5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["n2", "n3", ..., "n6"].
"""
feats = [_ for _ in feats]
num_levels = len(feats)
num_output_connections = [0 for _ in feats]
for fnode in self.nodes:
feat_level = fnode["feat_level"]
inputs_offsets = fnode["inputs_offsets"]
inputs_offsets_str = "_".join(map(str, inputs_offsets))
input_nodes = []
_, _, target_h, target_w = feats[feat_level].size()
for input_offset in inputs_offsets:
num_output_connections[input_offset] += 1
input_node = feats[input_offset]
# reduction
if input_node.size(1) != self.out_channels:
name = "lateral_{}_f{}".format(input_offset, feat_level)
input_node = self.__getattr__(name)(input_node)
# maybe downsample
_, _, h, w = input_node.size()
if h > target_h and w > target_w:
height_stride_size = int((h - 1) // target_h + 1)
width_stride_size = int((w - 1) // target_w + 1)
assert height_stride_size == width_stride_size == 2
input_node = F.max_pool2d(
input_node, kernel_size=(height_stride_size + 1, width_stride_size + 1),
stride=(height_stride_size, width_stride_size), padding=1
)
elif h <= target_h and w <= target_w:
if h < target_h or w < target_w:
input_node = F.interpolate(
input_node,
size=(target_h, target_w),
mode="nearest"
)
else:
raise NotImplementedError()
input_nodes.append(input_node)
# attention
name = "weights_f{}_{}".format(feat_level, inputs_offsets_str)
weights = F.relu(self.__getattr__(name))
norm_weights = weights / (weights.sum() + 0.0001)
new_node = torch.stack(input_nodes, dim=-1)
new_node = (norm_weights * new_node).sum(dim=-1)
new_node = swish(new_node)
name = "outputs_f{}_{}".format(feat_level, inputs_offsets_str)
feats.append(self.__getattr__(name)(new_node))
num_output_connections.append(0)
output_feats = []
for idx in range(num_levels):
for i, fnode in enumerate(reversed(self.nodes)):
if fnode['feat_level'] == idx:
output_feats.append(feats[-1 - i])
break
else:
raise ValueError()
return output_feats
class BiFPN(Backbone):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(
self, bottom_up, in_features, out_channels, num_top_levels, num_repeats, norm=""
):
"""
Args:
bottom_up (Backbone): module representing the bottom up subnetwork.
Must be a subclass of :class:`Backbone`. The multi-scale feature
maps generated by the bottom up network, and listed in `in_features`,
are used to generate FPN levels.
in_features (list[str]): names of the input feature maps coming
from the backbone to which FPN is attached. For example, if the
backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
of these may be used; order must be from high to low resolution.
out_channels (int): number of channels in the output feature maps.
num_top_levels (int): the number of the top levels (p6 or p7).
num_repeats (int): the number of repeats of BiFPN.
norm (str): the normalization to use.
"""
super(BiFPN, self).__init__()
assert isinstance(bottom_up, Backbone)
# add extra feature levels (i.e., 6 and 7)
self.bottom_up = BackboneWithTopLevels(
bottom_up, out_channels,
num_top_levels, norm
)
bottom_up_output_shapes = self.bottom_up.output_shape()
in_features = sorted(in_features, key=lambda x: split_name(x)[1])
self._size_divisibility = 128 #bottom_up_output_shapes[in_features[-1]].stride
self.out_channels = out_channels
self.min_level = split_name(in_features[0])[1]
# add the names for top blocks
prefix, last_suffix = split_name(in_features[-1])
for i in range(num_top_levels):
in_features.append(prefix + str(last_suffix + i + 1))
self.in_features = in_features
# generate output features
self._out_features = ["p{}".format(split_name(name)[1]) for name in in_features]
self._out_feature_strides = {
out_name: bottom_up_output_shapes[in_name].stride
for out_name, in_name in zip(self._out_features, in_features)
}
self._out_feature_channels = {k: out_channels for k in self._out_features}
# build bifpn
self.repeated_bifpn = nn.ModuleList()
for i in range(num_repeats):
if i == 0:
in_channels_list = [
bottom_up_output_shapes[name].channels for name in in_features
]
else:
in_channels_list = [
self._out_feature_channels[name] for name in self._out_features
]
self.repeated_bifpn.append(SingleBiFPN(
in_channels_list, out_channels, norm
))
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
"""
Args:
input (dict[str->Tensor]): mapping feature map name (e.g., "p5") to
feature map tensor for each feature level in high to low resolution order.
Returns:
dict[str->Tensor]:
mapping from feature map name to FPN feature map tensor
in high to low resolution order. Returned feature names follow the FPN
paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
["n2", "n3", ..., "n6"].
"""
bottom_up_features = self.bottom_up(x)
feats = [bottom_up_features[f] for f in self.in_features]
for bifpn in self.repeated_bifpn:
feats = bifpn(feats)
return dict(zip(self._out_features, feats))
def _assert_strides_are_log2_contiguous(strides):
"""
Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
"""
for i, stride in enumerate(strides[1:], 1):
assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
stride, strides[i - 1]
)
@BACKBONE_REGISTRY.register()
def build_fcos_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
top_levels = 2
backbone = BiFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
num_top_levels=top_levels,
num_repeats=num_repeats,
norm=cfg.MODEL.BIFPN.NORM
)
return backbone
@BACKBONE_REGISTRY.register()
def build_p35_fcos_resnet_bifpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
top_levels = 0
backbone = BiFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
num_top_levels=top_levels,
num_repeats=num_repeats,
norm=cfg.MODEL.BIFPN.NORM
)
return backbone
@BACKBONE_REGISTRY.register()
def build_p35_fcos_dla_bifpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = dla34(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
top_levels = 0
backbone = BiFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
num_top_levels=top_levels,
num_repeats=num_repeats,
norm=cfg.MODEL.BIFPN.NORM
)
return backbone
@BACKBONE_REGISTRY.register()
def build_p37_fcos_dla_bifpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = dla34(cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.BIFPN.OUT_CHANNELS
num_repeats = cfg.MODEL.BIFPN.NUM_BIFPN
assert cfg.MODEL.BIFPN.NUM_LEVELS == 5
top_levels = 2
backbone = BiFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
num_top_levels=top_levels,
num_repeats=num_repeats,
norm=cfg.MODEL.BIFPN.NORM
)
return backbone | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/backbone/dla.py | Python | import numpy as np
import math
from os.path import join
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
import torch.utils.model_zoo as model_zoo
from detectron2.modeling.backbone.resnet import (
BasicStem, BottleneckBlock, DeformBottleneckBlock)
from detectron2.layers import (
Conv2d,
DeformConv,
FrozenBatchNorm2d,
ModulatedDeformConv,
ShapeSpec,
get_norm,
)
from detectron2.modeling.backbone.backbone import Backbone
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone.fpn import FPN
__all__ = [
"BottleneckBlock",
"DeformBottleneckBlock",
"BasicStem",
]
DCNV1 = False
HASH = {
34: 'ba72cf86',
60: '24839fc4',
}
def get_model_url(data, name, hash):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1, norm='BN'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = get_norm(norm, planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = get_norm(norm, planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1, norm='BN'):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = get_norm(norm, bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = get_norm(norm, bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = get_norm(norm, planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual, norm='BN'):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = get_norm(norm, out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False, norm='BN'):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation, norm=norm)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation, norm=norm)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual,
norm=norm)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual,
norm=norm)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual, norm=norm)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
get_norm(norm, out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, num_layers, levels, channels,
block=BasicBlock, residual_root=False, norm='BN'):
"""
Args:
"""
super(DLA, self).__init__()
self.norm = norm
self.channels = channels
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
get_norm(self.norm, channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root, norm=norm)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root,
norm=norm)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root,
norm=norm)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root,
norm=norm)
self.load_pretrained_model(
data='imagenet', name='dla{}'.format(num_layers),
hash=HASH[num_layers])
def load_pretrained_model(self, data, name, hash):
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
print('Loading pretrained')
self.load_state_dict(model_weights, strict=False)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
get_norm(self.norm, planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = []
x = self.base_layer(x)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class _DeformConv(nn.Module):
def __init__(self, chi, cho, norm='BN'):
super(_DeformConv, self).__init__()
self.actf = nn.Sequential(
get_norm(norm, cho),
nn.ReLU(inplace=True)
)
if DCNV1:
self.offset = Conv2d(
chi, 18, kernel_size=3, stride=1,
padding=1, dilation=1)
self.conv = DeformConv(
chi, cho, kernel_size=(3,3), stride=1, padding=1,
dilation=1, deformable_groups=1)
else:
self.offset = Conv2d(
chi, 27, kernel_size=3, stride=1,
padding=1, dilation=1)
self.conv = ModulatedDeformConv(
chi, cho, kernel_size=3, stride=1, padding=1,
dilation=1, deformable_groups=1)
nn.init.constant_(self.offset.weight, 0)
nn.init.constant_(self.offset.bias, 0)
def forward(self, x):
if DCNV1:
offset = self.offset(x)
x = self.conv(x, offset)
else:
offset_mask = self.offset(x)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
x = self.conv(x, offset, mask)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f, norm='BN'):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = _DeformConv(c, o, norm=norm)
node = _DeformConv(o, o, norm=norm)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None, norm='BN'):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j], norm=norm))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
DLA_CONFIGS = {
34: ([1, 1, 1, 2, 2, 1], [16, 32, 64, 128, 256, 512], BasicBlock),
60: ([1, 1, 1, 2, 3, 1], [16, 32, 128, 256, 512, 1024], Bottleneck)
}
class DLASeg(Backbone):
def __init__(self, num_layers, out_features, use_dla_up=True,
ms_output=False, norm='BN'):
super(DLASeg, self).__init__()
# depth = 34
levels, channels, Block = DLA_CONFIGS[num_layers]
self.base = DLA(num_layers=num_layers,
levels=levels, channels=channels, block=Block, norm=norm)
down_ratio = 4
self.first_level = int(np.log2(down_ratio))
self.ms_output = ms_output
self.last_level = 5 if not self.ms_output else 6
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.use_dla_up = use_dla_up
if self.use_dla_up:
self.dla_up = DLAUp(
self.first_level, channels[self.first_level:], scales,
norm=norm)
out_channel = channels[self.first_level]
if not self.ms_output: # stride 4 DLA
self.ida_up = IDAUp(
out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)],
norm=norm)
self._out_features = out_features
self._out_feature_channels = {
'dla{}'.format(i): channels[i] for i in range(6)}
self._out_feature_strides = {
'dla{}'.format(i): 2 ** i for i in range(6)}
self._size_divisibility = 32
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
x = self.base(x)
if self.use_dla_up:
x = self.dla_up(x)
if not self.ms_output: # stride 4 dla
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
ret = {}
for i in range(self.last_level - self.first_level):
out_feature = 'dla{}'.format(i)
if out_feature in self._out_features:
ret[out_feature] = y[i]
else:
ret = {}
st = self.first_level if self.use_dla_up else 0
for i in range(self.last_level - st):
out_feature = 'dla{}'.format(i + st)
if out_feature in self._out_features:
ret[out_feature] = x[i]
return ret
@BACKBONE_REGISTRY.register()
def build_dla_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
return DLASeg(
out_features=cfg.MODEL.DLA.OUT_FEATURES,
num_layers=cfg.MODEL.DLA.NUM_LAYERS,
use_dla_up=cfg.MODEL.DLA.USE_DLA_UP,
ms_output=cfg.MODEL.DLA.MS_OUTPUT,
norm=cfg.MODEL.DLA.NORM)
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "dla5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
@BACKBONE_REGISTRY.register()
def build_retinanet_dla_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_dla_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_p6p7 = bottom_up.output_shape()['dla5'].channels
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/backbone/dlafpn.py | Python | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# this file is from https://github.com/ucbdrive/dla/blob/master/dla.py.
import math
from os.path import join
import numpy as np
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import fvcore.nn.weight_init as weight_init
from detectron2.modeling.backbone import FPN
from detectron2.layers import ShapeSpec, ModulatedDeformConv, Conv2d
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.layers.batch_norm import get_norm
from detectron2.modeling.backbone import Backbone
WEB_ROOT = 'http://dl.yf.io/dla/models'
def get_model_url(data, name, hash):
return join(
'http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, cfg, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = get_norm(cfg.MODEL.DLA.NORM, planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = get_norm(cfg.MODEL.DLA.NORM, planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, cfg, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = get_norm(cfg.MODEL.DLA.NORM, bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = get_norm(cfg.MODEL.DLA.NORM, bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = get_norm(cfg.MODEL.DLA.NORM, planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, cfg, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = get_norm(cfg.MODEL.DLA.NORM, out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, cfg, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(cfg, in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(cfg, out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(cfg, levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(cfg, levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(cfg, root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
get_norm(cfg.MODEL.DLA.NORM, out_channels)
)
def forward(self, x, residual=None, children=None):
if self.training and residual is not None:
x = x + residual.sum() * 0.0
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(Backbone):
def __init__(self, cfg, levels, channels, block=BasicBlock, residual_root=False):
super(DLA, self).__init__()
self.cfg = cfg
self.channels = channels
self._out_features = ["dla{}".format(i) for i in range(6)]
self._out_feature_channels = {k: channels[i] for i, k in enumerate(self._out_features)}
self._out_feature_strides = {k: 2 ** i for i, k in enumerate(self._out_features)}
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
get_norm(cfg.MODEL.DLA.NORM, channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(cfg, levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(cfg, levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(cfg, levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(cfg, levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
self.load_pretrained_model(
data='imagenet', name='dla34', hash='ba72cf86')
def load_pretrained_model(self, data, name, hash):
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
del model_weights['fc.weight']
del model_weights['fc.bias']
print('Loading pretrained DLA!')
self.load_state_dict(model_weights, strict=True)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
get_norm(self.cfg.MODEL.DLA.NORM, planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x):
y = {}
x = self.base_layer(x)
for i in range(6):
name = 'level{}'.format(i)
x = getattr(self, name)(x)
y['dla{}'.format(i)] = x
return y
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class Conv(nn.Module):
def __init__(self, chi, cho, norm):
super(Conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=1, stride=1, bias=False),
get_norm(norm, cho),
nn.ReLU(inplace=True))
def forward(self, x):
return self.conv(x)
class DeformConv(nn.Module):
def __init__(self, chi, cho, norm):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
get_norm(norm, cho),
nn.ReLU(inplace=True)
)
self.offset = Conv2d(
chi, 27, kernel_size=3, stride=1,
padding=1, dilation=1)
self.conv = ModulatedDeformConv(
chi, cho, kernel_size=3, stride=1, padding=1,
dilation=1, deformable_groups=1)
nn.init.constant_(self.offset.weight, 0)
nn.init.constant_(self.offset.bias, 0)
def forward(self, x):
offset_mask = self.offset(x)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
x = self.conv(x, offset, mask)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f, norm='FrozenBN', node_type=Conv):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = node_type(c, o, norm)
node = node_type(o, o, norm)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
DLAUP_NODE_MAP = {
'conv': Conv,
'dcn': DeformConv,
}
class DLAUP(Backbone):
def __init__(self, bottom_up, in_features, norm, dlaup_node='conv'):
super(DLAUP, self).__init__()
assert isinstance(bottom_up, Backbone)
self.bottom_up = bottom_up
input_shapes = bottom_up.output_shape()
in_strides = [input_shapes[f].stride for f in in_features]
in_channels = [input_shapes[f].channels for f in in_features]
in_levels = [int(math.log2(input_shapes[f].stride)) for f in in_features]
self.in_features = in_features
out_features = ['dlaup{}'.format(l) for l in in_levels]
self._out_features = out_features
self._out_feature_channels = {
'dlaup{}'.format(l): in_channels[i] for i, l in enumerate(in_levels)}
self._out_feature_strides = {
'dlaup{}'.format(l): 2 ** l for l in in_levels}
print('self._out_features', self._out_features)
print('self._out_feature_channels', self._out_feature_channels)
print('self._out_feature_strides', self._out_feature_strides)
self._size_divisibility = 32
node_type = DLAUP_NODE_MAP[dlaup_node]
self.startp = int(math.log2(in_strides[0]))
self.channels = in_channels
channels = list(in_channels)
scales = np.array([2 ** i for i in range(len(out_features))], dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j],
norm=norm,
node_type=node_type))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
@property
def size_divisibility(self):
return self._size_divisibility
def forward(self, x):
bottom_up_features = self.bottom_up(x)
layers = [bottom_up_features[f] for f in self.in_features]
out = [layers[-1]] # start with 32
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) - i - 2, len(layers))
out.insert(0, layers[-1])
ret = {}
for k, v in zip(self._out_features, out):
ret[k] = v
# import pdb; pdb.set_trace()
return ret
def dla34(cfg, pretrained=None): # DLA-34
model = DLA(cfg, [1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock)
return model
class LastLevelP6P7(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "dla5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
@BACKBONE_REGISTRY.register()
def build_dla_fpn3_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
depth_to_creator = {"dla34": dla34}
bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=None,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_dla_fpn5_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
depth_to_creator = {"dla34": dla34}
bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
in_channels_top = bottom_up.output_shape()['dla5'].channels
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7(in_channels_top, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_dlaup_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
depth_to_creator = {"dla34": dla34}
bottom_up = depth_to_creator['dla{}'.format(cfg.MODEL.DLA.NUM_LAYERS)](cfg)
backbone = DLAUP(
bottom_up=bottom_up,
in_features=cfg.MODEL.DLA.DLAUP_IN_FEATURES,
norm=cfg.MODEL.DLA.NORM,
dlaup_node=cfg.MODEL.DLA.DLAUP_NODE,
)
return backbone
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/backbone/fpn_p5.py | Python | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import fvcore.nn.weight_init as weight_init
import torch.nn.functional as F
from torch import nn
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import FPN
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from detectron2.modeling.backbone.resnet import build_resnet_backbone
class LastLevelP6P7_P5(nn.Module):
"""
This module is used in RetinaNet to generate extra layers, P6 and P7 from
C5 feature.
"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.num_levels = 2
self.in_feature = "p5"
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
weight_init.c2_xavier_fill(module)
def forward(self, c5):
p6 = self.p6(c5)
p7 = self.p7(F.relu(p6))
return [p6, p7]
@BACKBONE_REGISTRY.register()
def build_p67_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7_P5(out_channels, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_p35_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=None,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/backbone/res2net.py | Python | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# This file is modified from https://github.com/Res2Net/Res2Net-detectron2/blob/master/detectron2/modeling/backbone/resnet.py
# The original file is under Apache-2.0 License
import numpy as np
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from detectron2.layers import (
CNNBlockBase,
Conv2d,
DeformConv,
ModulatedDeformConv,
ShapeSpec,
get_norm,
)
from detectron2.modeling.backbone import Backbone
from detectron2.modeling.backbone.fpn import FPN
from detectron2.modeling.backbone.build import BACKBONE_REGISTRY
from .fpn_p5 import LastLevelP6P7_P5
from .bifpn import BiFPN
__all__ = [
"ResNetBlockBase",
"BasicBlock",
"BottleneckBlock",
"DeformBottleneckBlock",
"BasicStem",
"ResNet",
"make_stage",
"build_res2net_backbone",
]
ResNetBlockBase = CNNBlockBase
"""
Alias for backward compatibiltiy.
"""
class BasicBlock(CNNBlockBase):
"""
The basic residual block for ResNet-18 and ResNet-34, with two 3x3 conv layers
and a projection shortcut if needed.
"""
def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int): Stride for the first conv.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.conv2 = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BottleneckBlock(CNNBlockBase):
"""
The standard bottle2neck residual block used by Res2Net-50, 101 and 152.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
basewidth=26,
scale=4,
):
"""
Args:
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
num_groups (int): number of groups for the 3x3 conv layer.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
stride_in_1x1 (bool): when stride>1, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
dilation (int): the dilation rate of the 3x3 conv layer.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False),
Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, out_channels),
)
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
width = bottleneck_channels//scale
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if scale == 1:
self.nums = 1
else:
self.nums = scale -1
if self.in_channels!=self.out_channels and stride_3x3!=2:
self.pool = nn.AvgPool2d(kernel_size=3, stride = stride_3x3, padding=1)
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(
width,
width,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
))
bns.append(get_norm(norm, width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.scale = scale
self.width = width
self.in_channels = in_channels
self.out_channels = out_channels
self.stride_3x3 = stride_3x3
for layer in [self.conv1, self.conv3]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
if self.shortcut is not None:
for layer in self.shortcut.modules():
if isinstance(layer, Conv2d):
weight_init.c2_msra_fill(layer)
for layer in self.convs:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i==0 or self.in_channels!=self.out_channels:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = F.relu_(self.bns[i](sp))
if i==0:
out = sp
else:
out = torch.cat((out, sp), 1)
if self.scale!=1 and self.stride_3x3==1:
out = torch.cat((out, spx[self.nums]), 1)
elif self.scale != 1 and self.stride_3x3==2:
out = torch.cat((out, self.pool(spx[self.nums])), 1)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class DeformBottleneckBlock(ResNetBlockBase):
"""
Not implemented for res2net yet.
Similar to :class:`BottleneckBlock`, but with deformable conv in the 3x3 convolution.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
deform_modulated=False,
deform_num_groups=1,
basewidth=26,
scale=4,
):
super().__init__(in_channels, out_channels, stride)
self.deform_modulated = deform_modulated
if in_channels != out_channels:
# self.shortcut = Conv2d(
# in_channels,
# out_channels,
# kernel_size=1,
# stride=stride,
# bias=False,
# norm=get_norm(norm, out_channels),
# )
self.shortcut = nn.Sequential(
nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False),
Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
bias=False,
norm=get_norm(norm, out_channels),
)
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
width = bottleneck_channels//scale
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if scale == 1:
self.nums = 1
else:
self.nums = scale -1
if self.in_channels!=self.out_channels and stride_3x3!=2:
self.pool = nn.AvgPool2d(kernel_size=3, stride = stride_3x3, padding=1)
if deform_modulated:
deform_conv_op = ModulatedDeformConv
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
offset_channels = 27
else:
deform_conv_op = DeformConv
offset_channels = 18
# self.conv2_offset = Conv2d(
# bottleneck_channels,
# offset_channels * deform_num_groups,
# kernel_size=3,
# stride=stride_3x3,
# padding=1 * dilation,
# dilation=dilation,
# )
# self.conv2 = deform_conv_op(
# bottleneck_channels,
# bottleneck_channels,
# kernel_size=3,
# stride=stride_3x3,
# padding=1 * dilation,
# bias=False,
# groups=num_groups,
# dilation=dilation,
# deformable_groups=deform_num_groups,
# norm=get_norm(norm, bottleneck_channels),
# )
conv2_offsets = []
convs = []
bns = []
for i in range(self.nums):
conv2_offsets.append(Conv2d(
width,
offset_channels * deform_num_groups,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
))
convs.append(deform_conv_op(
width,
width,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
deformable_groups=deform_num_groups,
))
bns.append(get_norm(norm, width))
self.conv2_offsets = nn.ModuleList(conv2_offsets)
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.scale = scale
self.width = width
self.in_channels = in_channels
self.out_channels = out_channels
self.stride_3x3 = stride_3x3
# for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
# if layer is not None: # shortcut can be None
# weight_init.c2_msra_fill(layer)
# nn.init.constant_(self.conv2_offset.weight, 0)
# nn.init.constant_(self.conv2_offset.bias, 0)
for layer in [self.conv1, self.conv3]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
if self.shortcut is not None:
for layer in self.shortcut.modules():
if isinstance(layer, Conv2d):
weight_init.c2_msra_fill(layer)
for layer in self.convs:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
for layer in self.conv2_offsets:
if layer.weight is not None:
nn.init.constant_(layer.weight, 0)
if layer.bias is not None:
nn.init.constant_(layer.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
# if self.deform_modulated:
# offset_mask = self.conv2_offset(out)
# offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
# offset = torch.cat((offset_x, offset_y), dim=1)
# mask = mask.sigmoid()
# out = self.conv2(out, offset, mask)
# else:
# offset = self.conv2_offset(out)
# out = self.conv2(out, offset)
# out = F.relu_(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i==0 or self.in_channels!=self.out_channels:
sp = spx[i].contiguous()
else:
sp = sp + spx[i].contiguous()
# sp = self.convs[i](sp)
if self.deform_modulated:
offset_mask = self.conv2_offsets[i](sp)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
sp = self.convs[i](sp, offset, mask)
else:
offset = self.conv2_offsets[i](sp)
sp = self.convs[i](sp, offset)
sp = F.relu_(self.bns[i](sp))
if i==0:
out = sp
else:
out = torch.cat((out, sp), 1)
if self.scale!=1 and self.stride_3x3==1:
out = torch.cat((out, spx[self.nums]), 1)
elif self.scale != 1 and self.stride_3x3==2:
out = torch.cat((out, self.pool(spx[self.nums])), 1)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
def make_stage(block_class, num_blocks, first_stride, *, in_channels, out_channels, **kwargs):
"""
Create a list of blocks just like those in a ResNet stage.
Args:
block_class (type): a subclass of ResNetBlockBase
num_blocks (int):
first_stride (int): the stride of the first block. The other blocks will have stride=1.
in_channels (int): input channels of the entire stage.
out_channels (int): output channels of **every block** in the stage.
kwargs: other arguments passed to the constructor of every block.
Returns:
list[nn.Module]: a list of block module.
"""
assert "stride" not in kwargs, "Stride of blocks in make_stage cannot be changed."
blocks = []
for i in range(num_blocks):
blocks.append(
block_class(
in_channels=in_channels,
out_channels=out_channels,
stride=first_stride if i == 0 else 1,
**kwargs,
)
)
in_channels = out_channels
return blocks
class BasicStem(CNNBlockBase):
"""
The standard ResNet stem (layers before the first residual block).
"""
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): norm after the first conv layer.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, 4)
self.in_channels = in_channels
self.conv1 = nn.Sequential(
Conv2d(
in_channels,
32,
kernel_size=3,
stride=2,
padding=1,
bias=False,
),
get_norm(norm, 32),
nn.ReLU(inplace=True),
Conv2d(
32,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
get_norm(norm, 32),
nn.ReLU(inplace=True),
Conv2d(
32,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
)
self.bn1 = get_norm(norm, out_channels)
for layer in self.conv1:
if isinstance(layer, Conv2d):
weight_init.c2_msra_fill(layer)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
class ResNet(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[CNNBlockBase]]): several (typically 4) stages,
each contains multiple :class:`CNNBlockBase`.
num_classes (None or int): if None, will not perform classification.
Otherwise, will create a linear layer.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
"""
super(ResNet, self).__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, blocks in enumerate(stages):
assert len(blocks) > 0, len(blocks)
for block in blocks:
assert isinstance(block, CNNBlockBase), block
name = "res" + str(i + 2)
stage = nn.Sequential(*blocks)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def freeze(self, freeze_at=0):
"""
Freeze the first several stages of the ResNet. Commonly used in
fine-tuning.
Args:
freeze_at (int): number of stem and stages to freeze.
`1` means freezing the stem. `2` means freezing the stem and
the first stage, etc.
Returns:
nn.Module: this ResNet itself
"""
if freeze_at >= 1:
self.stem.freeze()
for idx, (stage, _) in enumerate(self.stages_and_names, start=2):
if freeze_at >= idx:
for block in stage.children():
block.freeze()
return self
@BACKBONE_REGISTRY.register()
def build_res2net_backbone(cfg, input_shape):
"""
Create a Res2Net instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
stem = BasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
# fmt: off
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
scale = 4
bottleneck_channels = num_groups * width_per_group * scale
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if depth in [18, 34]:
assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34"
assert not any(
deform_on_per_stage
), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34"
assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34"
assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34"
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels,
"out_channels": out_channels,
"norm": norm,
}
# Use BasicBlock for R18 and R34.
if depth in [18, 34]:
stage_kargs["block_class"] = BasicBlock
else:
stage_kargs["bottleneck_channels"] = bottleneck_channels
stage_kargs["stride_in_1x1"] = stride_in_1x1
stage_kargs["dilation"] = dilation
stage_kargs["num_groups"] = num_groups
stage_kargs["scale"] = scale
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features).freeze(freeze_at)
@BACKBONE_REGISTRY.register()
def build_p67_res2net_fpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_res2net_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = FPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelP6P7_P5(out_channels, out_channels),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_res2net_bifpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_res2net_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
backbone = BiFPN(
cfg=cfg,
bottom_up=bottom_up,
in_features=in_features,
out_channels=cfg.MODEL.BIFPN.OUT_CHANNELS,
norm=cfg.MODEL.BIFPN.NORM,
num_levels=cfg.MODEL.BIFPN.NUM_LEVELS,
num_bifpn=cfg.MODEL.BIFPN.NUM_BIFPN,
separable_conv=cfg.MODEL.BIFPN.SEPARABLE_CONV,
)
return backbone | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/debug.py | Python | import cv2
import numpy as np
import torch
import torch.nn.functional as F
COLORS = ((np.random.rand(1300, 3) * 0.4 + 0.6) * 255).astype(
np.uint8).reshape(1300, 1, 1, 3)
def _get_color_image(heatmap):
heatmap = heatmap.reshape(
heatmap.shape[0], heatmap.shape[1], heatmap.shape[2], 1)
if heatmap.shape[0] == 1:
color_map = (heatmap * np.ones((1, 1, 1, 3), np.uint8) * 255).max(
axis=0).astype(np.uint8) # H, W, 3
else:
color_map = (heatmap * COLORS[:heatmap.shape[0]]).max(axis=0).astype(np.uint8) # H, W, 3
return color_map
def _blend_image(image, color_map, a=0.7):
color_map = cv2.resize(color_map, (image.shape[1], image.shape[0]))
ret = np.clip(image * (1 - a) + color_map * a, 0, 255).astype(np.uint8)
return ret
def _blend_image_heatmaps(image, color_maps, a=0.7):
merges = np.zeros((image.shape[0], image.shape[1], 3), np.float32)
for color_map in color_maps:
color_map = cv2.resize(color_map, (image.shape[1], image.shape[0]))
merges = np.maximum(merges, color_map)
ret = np.clip(image * (1 - a) + merges * a, 0, 255).astype(np.uint8)
return ret
def _decompose_level(x, shapes_per_level, N):
'''
x: LNHiWi x C
'''
x = x.view(x.shape[0], -1)
ret = []
st = 0
for l in range(len(shapes_per_level)):
ret.append([])
h = shapes_per_level[l][0].int().item()
w = shapes_per_level[l][1].int().item()
for i in range(N):
ret[l].append(x[st + h * w * i:st + h * w * (i + 1)].view(
h, w, -1).permute(2, 0, 1))
st += h * w * N
return ret
def _imagelist_to_tensor(images):
images = [x for x in images]
image_sizes = [x.shape[-2:] for x in images]
h = max([size[0] for size in image_sizes])
w = max([size[1] for size in image_sizes])
S = 32
h, w = ((h - 1) // S + 1) * S, ((w - 1) // S + 1) * S
images = [F.pad(x, (0, w - x.shape[2], 0, h - x.shape[1], 0, 0)) \
for x in images]
images = torch.stack(images)
return images
def _ind2il(ind, shapes_per_level, N):
r = ind
l = 0
S = 0
while r - S >= N * shapes_per_level[l][0] * shapes_per_level[l][1]:
S += N * shapes_per_level[l][0] * shapes_per_level[l][1]
l += 1
i = (r - S) // (shapes_per_level[l][0] * shapes_per_level[l][1])
return i, l
def debug_train(
images, gt_instances, flattened_hms, reg_targets, labels, pos_inds,
shapes_per_level, locations, strides):
'''
images: N x 3 x H x W
flattened_hms: LNHiWi x C
shapes_per_level: L x 2 [(H_i, W_i)]
locations: LNHiWi x 2
'''
reg_inds = torch.nonzero(
reg_targets.max(dim=1)[0] > 0).squeeze(1)
N = len(images)
images = _imagelist_to_tensor(images)
repeated_locations = [torch.cat([loc] * N, dim=0) \
for loc in locations]
locations = torch.cat(repeated_locations, dim=0)
gt_hms = _decompose_level(flattened_hms, shapes_per_level, N)
masks = flattened_hms.new_zeros((flattened_hms.shape[0], 1))
masks[pos_inds] = 1
masks = _decompose_level(masks, shapes_per_level, N)
for i in range(len(images)):
image = images[i].detach().cpu().numpy().transpose(1, 2, 0)
color_maps = []
for l in range(len(gt_hms)):
color_map = _get_color_image(
gt_hms[l][i].detach().cpu().numpy())
color_maps.append(color_map)
cv2.imshow('gthm_{}'.format(l), color_map)
blend = _blend_image_heatmaps(image.copy(), color_maps)
if gt_instances is not None:
bboxes = gt_instances[i].gt_boxes.tensor
for j in range(len(bboxes)):
bbox = bboxes[j]
cv2.rectangle(
blend,
(int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
(0, 0, 255), 3, cv2.LINE_AA)
for j in range(len(pos_inds)):
image_id, l = _ind2il(pos_inds[j], shapes_per_level, N)
if image_id != i:
continue
loc = locations[pos_inds[j]]
cv2.drawMarker(
blend, (int(loc[0]), int(loc[1])), (0, 255, 255),
markerSize=(l + 1) * 16)
for j in range(len(reg_inds)):
image_id, l = _ind2il(reg_inds[j], shapes_per_level, N)
if image_id != i:
continue
ltrb = reg_targets[reg_inds[j]]
ltrb *= strides[l]
loc = locations[reg_inds[j]]
bbox = [(loc[0] - ltrb[0]), (loc[1] - ltrb[1]),
(loc[0] + ltrb[2]), (loc[1] + ltrb[3])]
cv2.rectangle(
blend,
(int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
(255, 0, 0), 1, cv2.LINE_AA)
cv2.circle(blend, (int(loc[0]), int(loc[1])), 2, (255, 0, 0), -1)
cv2.imshow('blend', blend)
cv2.waitKey()
def debug_test(
images, logits_pred, reg_pred, agn_hm_pred=[], preds=[],
vis_thresh=0.3, debug_show_name=False, mult_agn=False):
'''
images: N x 3 x H x W
class_target: LNHiWi x C
cat_agn_heatmap: LNHiWi
shapes_per_level: L x 2 [(H_i, W_i)]
'''
N = len(images)
for i in range(len(images)):
image = images[i].detach().cpu().numpy().transpose(1, 2, 0)
result = image.copy().astype(np.uint8)
pred_image = image.copy().astype(np.uint8)
color_maps = []
L = len(logits_pred)
for l in range(L):
if logits_pred[0] is not None:
stride = min(image.shape[0], image.shape[1]) / min(
logits_pred[l][i].shape[1], logits_pred[l][i].shape[2])
else:
stride = min(image.shape[0], image.shape[1]) / min(
agn_hm_pred[l][i].shape[1], agn_hm_pred[l][i].shape[2])
stride = stride if stride < 60 else 64 if stride < 100 else 128
if logits_pred[0] is not None:
if mult_agn:
logits_pred[l][i] = logits_pred[l][i] * agn_hm_pred[l][i]
color_map = _get_color_image(
logits_pred[l][i].detach().cpu().numpy())
color_maps.append(color_map)
cv2.imshow('predhm_{}'.format(l), color_map)
if debug_show_name:
from detectron2.data.datasets.lvis_v1_categories import LVIS_CATEGORIES
cat2name = [x['name'] for x in LVIS_CATEGORIES]
for j in range(len(preds[i].scores) if preds is not None else 0):
if preds[i].scores[j] > vis_thresh:
bbox = preds[i].proposal_boxes[j] \
if preds[i].has('proposal_boxes') else \
preds[i].pred_boxes[j]
bbox = bbox.tensor[0].detach().cpu().numpy().astype(np.int32)
cat = int(preds[i].pred_classes[j]) \
if preds[i].has('pred_classes') else 0
cl = COLORS[cat, 0, 0]
cv2.rectangle(
pred_image, (int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
(int(cl[0]), int(cl[1]), int(cl[2])), 2, cv2.LINE_AA)
if debug_show_name:
txt = '{}{:.1f}'.format(
cat2name[cat] if cat > 0 else '',
preds[i].scores[j])
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(
pred_image,
(int(bbox[0]), int(bbox[1] - cat_size[1] - 2)),
(int(bbox[0] + cat_size[0]), int(bbox[1] - 2)),
(int(cl[0]), int(cl[1]), int(cl[2])), -1)
cv2.putText(
pred_image, txt, (int(bbox[0]), int(bbox[1] - 2)),
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
if agn_hm_pred[l] is not None:
agn_hm_ = agn_hm_pred[l][i, 0, :, :, None].detach().cpu().numpy()
agn_hm_ = (agn_hm_ * np.array([255, 255, 255]).reshape(
1, 1, 3)).astype(np.uint8)
cv2.imshow('agn_hm_{}'.format(l), agn_hm_)
blend = _blend_image_heatmaps(image.copy(), color_maps)
cv2.imshow('blend', blend)
cv2.imshow('preds', pred_image)
cv2.waitKey()
global cnt
cnt = 0
def debug_second_stage(images, instances, proposals=None, vis_thresh=0.3,
save_debug=False, debug_show_name=False):
images = _imagelist_to_tensor(images)
if debug_show_name:
from detectron2.data.datasets.lvis_v1_categories import LVIS_CATEGORIES
cat2name = [x['name'] for x in LVIS_CATEGORIES]
for i in range(len(images)):
image = images[i].detach().cpu().numpy().transpose(1, 2, 0).astype(np.uint8).copy()
if instances[i].has('gt_boxes'):
bboxes = instances[i].gt_boxes.tensor.cpu().numpy()
scores = np.ones(bboxes.shape[0])
cats = instances[i].gt_classes.cpu().numpy()
else:
bboxes = instances[i].pred_boxes.tensor.cpu().numpy()
scores = instances[i].scores.cpu().numpy()
cats = instances[i].pred_classes.cpu().numpy()
for j in range(len(bboxes)):
if scores[j] > vis_thresh:
bbox = bboxes[j]
cl = COLORS[cats[j], 0, 0]
cl = (int(cl[0]), int(cl[1]), int(cl[2]))
cv2.rectangle(
image,
(int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
cl, 2, cv2.LINE_AA)
if debug_show_name:
cat = cats[j]
txt = '{}{:.1f}'.format(
cat2name[cat] if cat > 0 else '',
scores[j])
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, 0.5, 2)[0]
cv2.rectangle(
image,
(int(bbox[0]), int(bbox[1] - cat_size[1] - 2)),
(int(bbox[0] + cat_size[0]), int(bbox[1] - 2)),
(int(cl[0]), int(cl[1]), int(cl[2])), -1)
cv2.putText(
image, txt, (int(bbox[0]), int(bbox[1] - 2)),
font, 0.5, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
if proposals is not None:
proposal_image = images[i].detach().cpu().numpy().transpose(1, 2, 0).astype(np.uint8).copy()
bboxes = proposals[i].proposal_boxes.tensor.cpu().numpy()
if proposals[i].has('scores'):
scores = proposals[i].scores.cpu().numpy()
else:
scores = proposals[i].objectness_logits.sigmoid().cpu().numpy()
for j in range(len(bboxes)):
if scores[j] > vis_thresh:
bbox = bboxes[j]
cl = (209, 159, 83)
cv2.rectangle(
proposal_image,
(int(bbox[0]), int(bbox[1])),
(int(bbox[2]), int(bbox[3])),
cl, 2, cv2.LINE_AA)
cv2.imshow('image', image)
if proposals is not None:
cv2.imshow('proposals', proposal_image)
if save_debug:
global cnt
cnt += 1
cv2.imwrite('output/save_debug/{}.jpg'.format(cnt), proposal_image)
cv2.waitKey() | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/dense_heads/centernet.py | Python |
import math
import json
import copy
from typing import List, Dict
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
from detectron2.layers import ShapeSpec, cat
from detectron2.structures import Instances, Boxes
from detectron2.modeling import detector_postprocess
from detectron2.utils.comm import get_world_size
from detectron2.config import configurable
from ..layers.heatmap_focal_loss import heatmap_focal_loss_jit
from ..layers.heatmap_focal_loss import binary_heatmap_focal_loss_jit
from ..layers.iou_loss import IOULoss
from ..layers.ml_nms import ml_nms
from ..debug import debug_train, debug_test
from .utils import reduce_sum, _transpose
from .centernet_head import CenterNetHead
__all__ = ["CenterNet"]
INF = 100000000
@PROPOSAL_GENERATOR_REGISTRY.register()
class CenterNet(nn.Module):
@configurable
def __init__(self,
# input_shape: Dict[str, ShapeSpec],
in_channels=256,
*,
num_classes=80,
in_features=("p3", "p4", "p5", "p6", "p7"),
strides=(8, 16, 32, 64, 128),
score_thresh=0.05,
hm_min_overlap=0.8,
loc_loss_type='giou',
min_radius=4,
hm_focal_alpha=0.25,
hm_focal_beta=4,
loss_gamma=2.0,
reg_weight=2.0,
not_norm_reg=True,
with_agn_hm=False,
only_proposal=False,
as_proposal=False,
not_nms=False,
pos_weight=1.,
neg_weight=1.,
sigmoid_clamp=1e-4,
ignore_high_fp=-1.,
center_nms=False,
sizes_of_interest=[[0,80],[64,160],[128,320],[256,640],[512,10000000]],
more_pos=False,
more_pos_thresh=0.2,
more_pos_topk=9,
pre_nms_topk_train=1000,
pre_nms_topk_test=1000,
post_nms_topk_train=100,
post_nms_topk_test=100,
nms_thresh_train=0.6,
nms_thresh_test=0.6,
no_reduce=False,
not_clamp_box=False,
debug=False,
vis_thresh=0.5,
pixel_mean=[103.530,116.280,123.675],
pixel_std=[1.0,1.0,1.0],
device='cuda',
centernet_head=None,
):
super().__init__()
self.num_classes = num_classes
self.in_features = in_features
self.strides = strides
self.score_thresh = score_thresh
self.min_radius = min_radius
self.hm_focal_alpha = hm_focal_alpha
self.hm_focal_beta = hm_focal_beta
self.loss_gamma = loss_gamma
self.reg_weight = reg_weight
self.not_norm_reg = not_norm_reg
self.with_agn_hm = with_agn_hm
self.only_proposal = only_proposal
self.as_proposal = as_proposal
self.not_nms = not_nms
self.pos_weight = pos_weight
self.neg_weight = neg_weight
self.sigmoid_clamp = sigmoid_clamp
self.ignore_high_fp = ignore_high_fp
self.center_nms = center_nms
self.sizes_of_interest = sizes_of_interest
self.more_pos = more_pos
self.more_pos_thresh = more_pos_thresh
self.more_pos_topk = more_pos_topk
self.pre_nms_topk_train = pre_nms_topk_train
self.pre_nms_topk_test = pre_nms_topk_test
self.post_nms_topk_train = post_nms_topk_train
self.post_nms_topk_test = post_nms_topk_test
self.nms_thresh_train = nms_thresh_train
self.nms_thresh_test = nms_thresh_test
self.no_reduce = no_reduce
self.not_clamp_box = not_clamp_box
self.debug = debug
self.vis_thresh = vis_thresh
if self.center_nms:
self.not_nms = True
self.iou_loss = IOULoss(loc_loss_type)
assert (not self.only_proposal) or self.with_agn_hm
# delta for rendering heatmap
self.delta = (1 - hm_min_overlap) / (1 + hm_min_overlap)
if centernet_head is None:
self.centernet_head = CenterNetHead(
in_channels=in_channels,
num_levels=len(in_features),
with_agn_hm=with_agn_hm,
only_proposal=only_proposal)
else:
self.centernet_head = centernet_head
if self.debug:
pixel_mean = torch.Tensor(pixel_mean).to(
torch.device(device)).view(3, 1, 1)
pixel_std = torch.Tensor(pixel_std).to(
torch.device(device)).view(3, 1, 1)
self.denormalizer = lambda x: x * pixel_std + pixel_mean
@classmethod
def from_config(cls, cfg, input_shape):
ret = {
# 'input_shape': input_shape,
'in_channels': input_shape[
cfg.MODEL.CENTERNET.IN_FEATURES[0]].channels,
'num_classes': cfg.MODEL.CENTERNET.NUM_CLASSES,
'in_features': cfg.MODEL.CENTERNET.IN_FEATURES,
'strides': cfg.MODEL.CENTERNET.FPN_STRIDES,
'score_thresh': cfg.MODEL.CENTERNET.INFERENCE_TH,
'loc_loss_type': cfg.MODEL.CENTERNET.LOC_LOSS_TYPE,
'hm_min_overlap': cfg.MODEL.CENTERNET.HM_MIN_OVERLAP,
'min_radius': cfg.MODEL.CENTERNET.MIN_RADIUS,
'hm_focal_alpha': cfg.MODEL.CENTERNET.HM_FOCAL_ALPHA,
'hm_focal_beta': cfg.MODEL.CENTERNET.HM_FOCAL_BETA,
'loss_gamma': cfg.MODEL.CENTERNET.LOSS_GAMMA,
'reg_weight': cfg.MODEL.CENTERNET.REG_WEIGHT,
'not_norm_reg': cfg.MODEL.CENTERNET.NOT_NORM_REG,
'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM,
'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL,
'as_proposal': cfg.MODEL.CENTERNET.AS_PROPOSAL,
'not_nms': cfg.MODEL.CENTERNET.NOT_NMS,
'pos_weight': cfg.MODEL.CENTERNET.POS_WEIGHT,
'neg_weight': cfg.MODEL.CENTERNET.NEG_WEIGHT,
'sigmoid_clamp': cfg.MODEL.CENTERNET.SIGMOID_CLAMP,
'ignore_high_fp': cfg.MODEL.CENTERNET.IGNORE_HIGH_FP,
'center_nms': cfg.MODEL.CENTERNET.CENTER_NMS,
'sizes_of_interest': cfg.MODEL.CENTERNET.SOI,
'more_pos': cfg.MODEL.CENTERNET.MORE_POS,
'more_pos_thresh': cfg.MODEL.CENTERNET.MORE_POS_THRESH,
'more_pos_topk': cfg.MODEL.CENTERNET.MORE_POS_TOPK,
'pre_nms_topk_train': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TRAIN,
'pre_nms_topk_test': cfg.MODEL.CENTERNET.PRE_NMS_TOPK_TEST,
'post_nms_topk_train': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TRAIN,
'post_nms_topk_test': cfg.MODEL.CENTERNET.POST_NMS_TOPK_TEST,
'nms_thresh_train': cfg.MODEL.CENTERNET.NMS_TH_TRAIN,
'nms_thresh_test': cfg.MODEL.CENTERNET.NMS_TH_TEST,
'no_reduce': cfg.MODEL.CENTERNET.NO_REDUCE,
'not_clamp_box': cfg.INPUT.NOT_CLAMP_BOX,
'debug': cfg.DEBUG,
'vis_thresh': cfg.VIS_THRESH,
'pixel_mean': cfg.MODEL.PIXEL_MEAN,
'pixel_std': cfg.MODEL.PIXEL_STD,
'device': cfg.MODEL.DEVICE,
'centernet_head': CenterNetHead(
cfg, [input_shape[f] for f in cfg.MODEL.CENTERNET.IN_FEATURES]),
}
return ret
def forward(self, images, features_dict, gt_instances):
features = [features_dict[f] for f in self.in_features]
clss_per_level, reg_pred_per_level, agn_hm_pred_per_level = \
self.centernet_head(features)
grids = self.compute_grids(features)
shapes_per_level = grids[0].new_tensor(
[(x.shape[2], x.shape[3]) for x in reg_pred_per_level])
if not self.training:
return self.inference(
images, clss_per_level, reg_pred_per_level,
agn_hm_pred_per_level, grids)
else:
pos_inds, labels, reg_targets, flattened_hms = \
self._get_ground_truth(
grids, shapes_per_level, gt_instances)
# logits_pred: M x F, reg_pred: M x 4, agn_hm_pred: M
logits_pred, reg_pred, agn_hm_pred = self._flatten_outputs(
clss_per_level, reg_pred_per_level, agn_hm_pred_per_level)
if self.more_pos:
# add more pixels as positive if \
# 1. they are within the center3x3 region of an object
# 2. their regression losses are small (<self.more_pos_thresh)
pos_inds, labels = self._add_more_pos(
reg_pred, gt_instances, shapes_per_level)
losses = self.losses(
pos_inds, labels, reg_targets, flattened_hms,
logits_pred, reg_pred, agn_hm_pred)
proposals = None
if self.only_proposal:
agn_hm_pred_per_level = [x.sigmoid() for x in agn_hm_pred_per_level]
proposals = self.predict_instances(
grids, agn_hm_pred_per_level, reg_pred_per_level,
images.image_sizes, [None for _ in agn_hm_pred_per_level])
elif self.as_proposal: # category specific bbox as agnostic proposals
clss_per_level = [x.sigmoid() for x in clss_per_level]
proposals = self.predict_instances(
grids, clss_per_level, reg_pred_per_level,
images.image_sizes, agn_hm_pred_per_level)
if self.only_proposal or self.as_proposal:
for p in range(len(proposals)):
proposals[p].proposal_boxes = proposals[p].get('pred_boxes')
proposals[p].objectness_logits = proposals[p].get('scores')
proposals[p].remove('pred_boxes')
proposals[p].remove('scores')
proposals[p].remove('pred_classes')
if self.debug:
debug_train(
[self.denormalizer(x) for x in images],
gt_instances, flattened_hms, reg_targets,
labels, pos_inds, shapes_per_level, grids, self.strides)
return proposals, losses
def losses(
self, pos_inds, labels, reg_targets, flattened_hms,
logits_pred, reg_pred, agn_hm_pred):
'''
Inputs:
pos_inds: N
labels: N
reg_targets: M x 4
flattened_hms: M x C
logits_pred: M x C
reg_pred: M x 4
agn_hm_pred: M x 1 or None
N: number of positive locations in all images
M: number of pixels from all FPN levels
C: number of classes
'''
assert (torch.isfinite(reg_pred).all().item())
num_pos_local = pos_inds.numel()
num_gpus = get_world_size()
if self.no_reduce:
total_num_pos = num_pos_local * num_gpus
else:
total_num_pos = reduce_sum(
pos_inds.new_tensor([num_pos_local])).item()
num_pos_avg = max(total_num_pos / num_gpus, 1.0)
losses = {}
if not self.only_proposal:
pos_loss, neg_loss = heatmap_focal_loss_jit(
logits_pred.float(), flattened_hms.float(), pos_inds, labels,
alpha=self.hm_focal_alpha,
beta=self.hm_focal_beta,
gamma=self.loss_gamma,
reduction='sum',
sigmoid_clamp=self.sigmoid_clamp,
ignore_high_fp=self.ignore_high_fp,
)
pos_loss = self.pos_weight * pos_loss / num_pos_avg
neg_loss = self.neg_weight * neg_loss / num_pos_avg
losses['loss_centernet_pos'] = pos_loss
losses['loss_centernet_neg'] = neg_loss
reg_inds = torch.nonzero(reg_targets.max(dim=1)[0] >= 0).squeeze(1)
reg_pred = reg_pred[reg_inds]
reg_targets_pos = reg_targets[reg_inds]
reg_weight_map = flattened_hms.max(dim=1)[0]
reg_weight_map = reg_weight_map[reg_inds]
reg_weight_map = reg_weight_map * 0 + 1 \
if self.not_norm_reg else reg_weight_map
if self.no_reduce:
reg_norm = max(reg_weight_map.sum(), 1)
else:
reg_norm = max(reduce_sum(reg_weight_map.sum()).item() / num_gpus, 1)
reg_loss = self.reg_weight * self.iou_loss(
reg_pred, reg_targets_pos, reg_weight_map,
reduction='sum') / reg_norm
losses['loss_centernet_loc'] = reg_loss
if self.with_agn_hm:
cat_agn_heatmap = flattened_hms.max(dim=1)[0] # M
agn_pos_loss, agn_neg_loss = binary_heatmap_focal_loss_jit(
agn_hm_pred.float(), cat_agn_heatmap.float(), pos_inds,
alpha=self.hm_focal_alpha,
beta=self.hm_focal_beta,
gamma=self.loss_gamma,
sigmoid_clamp=self.sigmoid_clamp,
ignore_high_fp=self.ignore_high_fp,
)
agn_pos_loss = self.pos_weight * agn_pos_loss / num_pos_avg
agn_neg_loss = self.neg_weight * agn_neg_loss / num_pos_avg
losses['loss_centernet_agn_pos'] = agn_pos_loss
losses['loss_centernet_agn_neg'] = agn_neg_loss
if self.debug:
print('losses', losses)
print('total_num_pos', total_num_pos)
return losses
def compute_grids(self, features):
grids = []
for level, feature in enumerate(features):
h, w = feature.size()[-2:]
shifts_x = torch.arange(
0, w * self.strides[level],
step=self.strides[level],
dtype=torch.float32, device=feature.device)
shifts_y = torch.arange(
0, h * self.strides[level],
step=self.strides[level],
dtype=torch.float32, device=feature.device)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
grids_per_level = torch.stack((shift_x, shift_y), dim=1) + \
self.strides[level] // 2
grids.append(grids_per_level)
return grids
def _get_ground_truth(self, grids, shapes_per_level, gt_instances):
'''
Input:
grids: list of tensors [(hl x wl, 2)]_l
shapes_per_level: list of tuples L x 2:
gt_instances: gt instances
Retuen:
pos_inds: N
labels: N
reg_targets: M x 4
flattened_hms: M x C or M x 1
N: number of objects in all images
M: number of pixels from all FPN levels
'''
# get positive pixel index
if not self.more_pos:
pos_inds, labels = self._get_label_inds(
gt_instances, shapes_per_level)
else:
pos_inds, labels = None, None
heatmap_channels = self.num_classes
L = len(grids)
num_loc_list = [len(loc) for loc in grids]
strides = torch.cat([
shapes_per_level.new_ones(num_loc_list[l]) * self.strides[l] \
for l in range(L)]).float() # M
reg_size_ranges = torch.cat([
shapes_per_level.new_tensor(self.sizes_of_interest[l]).float().view(
1, 2).expand(num_loc_list[l], 2) for l in range(L)]) # M x 2
grids = torch.cat(grids, dim=0) # M x 2
M = grids.shape[0]
reg_targets = []
flattened_hms = []
for i in range(len(gt_instances)): # images
boxes = gt_instances[i].gt_boxes.tensor # N x 4
area = gt_instances[i].gt_boxes.area() # N
gt_classes = gt_instances[i].gt_classes # N in [0, self.num_classes]
N = boxes.shape[0]
if N == 0:
reg_targets.append(grids.new_zeros((M, 4)) - INF)
flattened_hms.append(
grids.new_zeros((
M, 1 if self.only_proposal else heatmap_channels)))
continue
l = grids[:, 0].view(M, 1) - boxes[:, 0].view(1, N) # M x N
t = grids[:, 1].view(M, 1) - boxes[:, 1].view(1, N) # M x N
r = boxes[:, 2].view(1, N) - grids[:, 0].view(M, 1) # M x N
b = boxes[:, 3].view(1, N) - grids[:, 1].view(M, 1) # M x N
reg_target = torch.stack([l, t, r, b], dim=2) # M x N x 4
centers = ((boxes[:, [0, 1]] + boxes[:, [2, 3]]) / 2) # N x 2
centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2
strides_expanded = strides.view(M, 1, 1).expand(M, N, 2)
centers_discret = ((centers_expanded / strides_expanded).int() * \
strides_expanded).float() + strides_expanded / 2 # M x N x 2
is_peak = (((grids.view(M, 1, 2).expand(M, N, 2) - \
centers_discret) ** 2).sum(dim=2) == 0) # M x N
is_in_boxes = reg_target.min(dim=2)[0] > 0 # M x N
is_center3x3 = self.get_center3x3(
grids, centers, strides) & is_in_boxes # M x N
is_cared_in_the_level = self.assign_reg_fpn(
reg_target, reg_size_ranges) # M x N
reg_mask = is_center3x3 & is_cared_in_the_level # M x N
dist2 = ((grids.view(M, 1, 2).expand(M, N, 2) - \
centers_expanded) ** 2).sum(dim=2) # M x N
dist2[is_peak] = 0
radius2 = self.delta ** 2 * 2 * area # N
radius2 = torch.clamp(
radius2, min=self.min_radius ** 2)
weighted_dist2 = dist2 / radius2.view(1, N).expand(M, N) # M x N
reg_target = self._get_reg_targets(
reg_target, weighted_dist2.clone(), reg_mask, area) # M x 4
if self.only_proposal:
flattened_hm = self._create_agn_heatmaps_from_dist(
weighted_dist2.clone()) # M x 1
else:
flattened_hm = self._create_heatmaps_from_dist(
weighted_dist2.clone(), gt_classes,
channels=heatmap_channels) # M x C
reg_targets.append(reg_target)
flattened_hms.append(flattened_hm)
# transpose im first training_targets to level first ones
reg_targets = _transpose(reg_targets, num_loc_list)
flattened_hms = _transpose(flattened_hms, num_loc_list)
for l in range(len(reg_targets)):
reg_targets[l] = reg_targets[l] / float(self.strides[l])
reg_targets = cat([x for x in reg_targets], dim=0) # MB x 4
flattened_hms = cat([x for x in flattened_hms], dim=0) # MB x C
return pos_inds, labels, reg_targets, flattened_hms
def _get_label_inds(self, gt_instances, shapes_per_level):
'''
Inputs:
gt_instances: [n_i], sum n_i = N
shapes_per_level: L x 2 [(h_l, w_l)]_L
Returns:
pos_inds: N'
labels: N'
'''
pos_inds = []
labels = []
L = len(self.strides)
B = len(gt_instances)
shapes_per_level = shapes_per_level.long()
loc_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]).long() # L
level_bases = []
s = 0
for l in range(L):
level_bases.append(s)
s = s + B * loc_per_level[l]
level_bases = shapes_per_level.new_tensor(level_bases).long() # L
strides_default = shapes_per_level.new_tensor(self.strides).float() # L
for im_i in range(B):
targets_per_im = gt_instances[im_i]
bboxes = targets_per_im.gt_boxes.tensor # n x 4
n = bboxes.shape[0]
centers = ((bboxes[:, [0, 1]] + bboxes[:, [2, 3]]) / 2) # n x 2
centers = centers.view(n, 1, 2).expand(n, L, 2).contiguous()
if self.not_clamp_box:
h, w = gt_instances[im_i]._image_size
centers[:, :, 0].clamp_(min=0).clamp_(max=w-1)
centers[:, :, 1].clamp_(min=0).clamp_(max=h-1)
strides = strides_default.view(1, L, 1).expand(n, L, 2)
centers_inds = (centers / strides).long() # n x L x 2
Ws = shapes_per_level[:, 1].view(1, L).expand(n, L)
pos_ind = level_bases.view(1, L).expand(n, L) + \
im_i * loc_per_level.view(1, L).expand(n, L) + \
centers_inds[:, :, 1] * Ws + \
centers_inds[:, :, 0] # n x L
is_cared_in_the_level = self.assign_fpn_level(bboxes)
pos_ind = pos_ind[is_cared_in_the_level].view(-1)
label = targets_per_im.gt_classes.view(
n, 1).expand(n, L)[is_cared_in_the_level].view(-1)
pos_inds.append(pos_ind) # n'
labels.append(label) # n'
pos_inds = torch.cat(pos_inds, dim=0).long()
labels = torch.cat(labels, dim=0)
return pos_inds, labels # N, N
def assign_fpn_level(self, boxes):
'''
Inputs:
boxes: n x 4
size_ranges: L x 2
Return:
is_cared_in_the_level: n x L
'''
size_ranges = boxes.new_tensor(
self.sizes_of_interest).view(len(self.sizes_of_interest), 2) # L x 2
crit = ((boxes[:, 2:] - boxes[:, :2]) **2).sum(dim=1) ** 0.5 / 2 # n
n, L = crit.shape[0], size_ranges.shape[0]
crit = crit.view(n, 1).expand(n, L)
size_ranges_expand = size_ranges.view(1, L, 2).expand(n, L, 2)
is_cared_in_the_level = (crit >= size_ranges_expand[:, :, 0]) & \
(crit <= size_ranges_expand[:, :, 1])
return is_cared_in_the_level
def assign_reg_fpn(self, reg_targets_per_im, size_ranges):
'''
TODO (Xingyi): merge it with assign_fpn_level
Inputs:
reg_targets_per_im: M x N x 4
size_ranges: M x 2
'''
crit = ((reg_targets_per_im[:, :, :2] + \
reg_targets_per_im[:, :, 2:])**2).sum(dim=2) ** 0.5 / 2 # M x N
is_cared_in_the_level = (crit >= size_ranges[:, [0]]) & \
(crit <= size_ranges[:, [1]])
return is_cared_in_the_level
def _get_reg_targets(self, reg_targets, dist, mask, area):
'''
reg_targets (M x N x 4): long tensor
dist (M x N)
is_*: M x N
'''
dist[mask == 0] = INF * 1.0
min_dist, min_inds = dist.min(dim=1) # M
reg_targets_per_im = reg_targets[
range(len(reg_targets)), min_inds] # M x N x 4 --> M x 4
reg_targets_per_im[min_dist == INF] = - INF
return reg_targets_per_im
def _create_heatmaps_from_dist(self, dist, labels, channels):
'''
dist: M x N
labels: N
return:
heatmaps: M x C
'''
heatmaps = dist.new_zeros((dist.shape[0], channels))
for c in range(channels):
inds = (labels == c) # N
if inds.int().sum() == 0:
continue
heatmaps[:, c] = torch.exp(-dist[:, inds].min(dim=1)[0])
zeros = heatmaps[:, c] < 1e-4
heatmaps[zeros, c] = 0
return heatmaps
def _create_agn_heatmaps_from_dist(self, dist):
'''
TODO (Xingyi): merge it with _create_heatmaps_from_dist
dist: M x N
return:
heatmaps: M x 1
'''
heatmaps = dist.new_zeros((dist.shape[0], 1))
heatmaps[:, 0] = torch.exp(-dist.min(dim=1)[0])
zeros = heatmaps < 1e-4
heatmaps[zeros] = 0
return heatmaps
def _flatten_outputs(self, clss, reg_pred, agn_hm_pred):
# Reshape: (N, F, Hl, Wl) -> (N, Hl, Wl, F) -> (sum_l N*Hl*Wl, F)
clss = cat([x.permute(0, 2, 3, 1).reshape(-1, x.shape[1]) \
for x in clss], dim=0) if clss[0] is not None else None
reg_pred = cat(
[x.permute(0, 2, 3, 1).reshape(-1, 4) for x in reg_pred], dim=0)
agn_hm_pred = cat([x.permute(0, 2, 3, 1).reshape(-1) \
for x in agn_hm_pred], dim=0) if self.with_agn_hm else None
return clss, reg_pred, agn_hm_pred
def get_center3x3(self, locations, centers, strides):
'''
Inputs:
locations: M x 2
centers: N x 2
strides: M
'''
M, N = locations.shape[0], centers.shape[0]
locations_expanded = locations.view(M, 1, 2).expand(M, N, 2) # M x N x 2
centers_expanded = centers.view(1, N, 2).expand(M, N, 2) # M x N x 2
strides_expanded = strides.view(M, 1, 1).expand(M, N, 2) # M x N
centers_discret = ((centers_expanded / strides_expanded).int() * \
strides_expanded).float() + strides_expanded / 2 # M x N x 2
dist_x = (locations_expanded[:, :, 0] - centers_discret[:, :, 0]).abs()
dist_y = (locations_expanded[:, :, 1] - centers_discret[:, :, 1]).abs()
return (dist_x <= strides_expanded[:, :, 0]) & \
(dist_y <= strides_expanded[:, :, 0])
@torch.no_grad()
def inference(self, images, clss_per_level, reg_pred_per_level,
agn_hm_pred_per_level, grids):
logits_pred = [x.sigmoid() if x is not None else None \
for x in clss_per_level]
agn_hm_pred_per_level = [x.sigmoid() if x is not None else None \
for x in agn_hm_pred_per_level]
if self.only_proposal:
proposals = self.predict_instances(
grids, agn_hm_pred_per_level, reg_pred_per_level,
images.image_sizes, [None for _ in agn_hm_pred_per_level])
else:
proposals = self.predict_instances(
grids, logits_pred, reg_pred_per_level,
images.image_sizes, agn_hm_pred_per_level)
if self.as_proposal or self.only_proposal:
for p in range(len(proposals)):
proposals[p].proposal_boxes = proposals[p].get('pred_boxes')
proposals[p].objectness_logits = proposals[p].get('scores')
proposals[p].remove('pred_boxes')
if self.debug:
debug_test(
[self.denormalizer(x) for x in images],
logits_pred, reg_pred_per_level,
agn_hm_pred_per_level, preds=proposals,
vis_thresh=self.vis_thresh,
debug_show_name=False)
return proposals, {}
@torch.no_grad()
def predict_instances(
self, grids, logits_pred, reg_pred, image_sizes, agn_hm_pred,
is_proposal=False):
sampled_boxes = []
for l in range(len(grids)):
sampled_boxes.append(self.predict_single_level(
grids[l], logits_pred[l], reg_pred[l] * self.strides[l],
image_sizes, agn_hm_pred[l], l, is_proposal=is_proposal))
boxlists = list(zip(*sampled_boxes))
boxlists = [Instances.cat(boxlist) for boxlist in boxlists]
boxlists = self.nms_and_topK(
boxlists, nms=not self.not_nms)
return boxlists
@torch.no_grad()
def predict_single_level(
self, grids, heatmap, reg_pred, image_sizes, agn_hm, level,
is_proposal=False):
N, C, H, W = heatmap.shape
# put in the same format as grids
if self.center_nms:
heatmap_nms = nn.functional.max_pool2d(
heatmap, (3, 3), stride=1, padding=1)
heatmap = heatmap * (heatmap_nms == heatmap).float()
heatmap = heatmap.permute(0, 2, 3, 1) # N x H x W x C
heatmap = heatmap.reshape(N, -1, C) # N x HW x C
box_regression = reg_pred.view(N, 4, H, W).permute(0, 2, 3, 1) # N x H x W x 4
box_regression = box_regression.reshape(N, -1, 4)
candidate_inds = heatmap > self.score_thresh # 0.05
pre_nms_top_n = candidate_inds.view(N, -1).sum(1) # N
pre_nms_topk = self.pre_nms_topk_train if self.training else self.pre_nms_topk_test
pre_nms_top_n = pre_nms_top_n.clamp(max=pre_nms_topk) # N
if agn_hm is not None:
agn_hm = agn_hm.view(N, 1, H, W).permute(0, 2, 3, 1)
agn_hm = agn_hm.reshape(N, -1)
heatmap = heatmap * agn_hm[:, :, None]
results = []
for i in range(N):
per_box_cls = heatmap[i] # HW x C
per_candidate_inds = candidate_inds[i] # n
per_box_cls = per_box_cls[per_candidate_inds] # n
per_candidate_nonzeros = per_candidate_inds.nonzero() # n
per_box_loc = per_candidate_nonzeros[:, 0] # n
per_class = per_candidate_nonzeros[:, 1] # n
per_box_regression = box_regression[i] # HW x 4
per_box_regression = per_box_regression[per_box_loc] # n x 4
per_grids = grids[per_box_loc] # n x 2
per_pre_nms_top_n = pre_nms_top_n[i] # 1
if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
per_box_cls, top_k_indices = \
per_box_cls.topk(per_pre_nms_top_n, sorted=False)
per_class = per_class[top_k_indices]
per_box_regression = per_box_regression[top_k_indices]
per_grids = per_grids[top_k_indices]
detections = torch.stack([
per_grids[:, 0] - per_box_regression[:, 0],
per_grids[:, 1] - per_box_regression[:, 1],
per_grids[:, 0] + per_box_regression[:, 2],
per_grids[:, 1] + per_box_regression[:, 3],
], dim=1) # n x 4
# avoid invalid boxes in RoI heads
detections[:, 2] = torch.max(detections[:, 2], detections[:, 0] + 0.01)
detections[:, 3] = torch.max(detections[:, 3], detections[:, 1] + 0.01)
boxlist = Instances(image_sizes[i])
boxlist.scores = torch.sqrt(per_box_cls) \
if self.with_agn_hm else per_box_cls # n
# import pdb; pdb.set_trace()
boxlist.pred_boxes = Boxes(detections)
boxlist.pred_classes = per_class
results.append(boxlist)
return results
@torch.no_grad()
def nms_and_topK(self, boxlists, nms=True):
num_images = len(boxlists)
results = []
for i in range(num_images):
nms_thresh = self.nms_thresh_train if self.training else \
self.nms_thresh_test
result = ml_nms(boxlists[i], nms_thresh) if nms else boxlists[i]
if self.debug:
print('#proposals before nms', len(boxlists[i]))
print('#proposals after nms', len(result))
num_dets = len(result)
post_nms_topk = self.post_nms_topk_train if self.training else \
self.post_nms_topk_test
if num_dets > post_nms_topk:
cls_scores = result.scores
image_thresh, _ = torch.kthvalue(
cls_scores.float().cpu(),
num_dets - post_nms_topk + 1
)
keep = cls_scores >= image_thresh.item()
keep = torch.nonzero(keep).squeeze(1)
result = result[keep]
if self.debug:
print('#proposals after filter', len(result))
results.append(result)
return results
@torch.no_grad()
def _add_more_pos(self, reg_pred, gt_instances, shapes_per_level):
labels, level_masks, c33_inds, c33_masks, c33_regs = \
self._get_c33_inds(gt_instances, shapes_per_level)
N, L, K = labels.shape[0], len(self.strides), 9
c33_inds[c33_masks == 0] = 0
reg_pred_c33 = reg_pred[c33_inds].detach() # N x L x K
invalid_reg = c33_masks == 0
c33_regs_expand = c33_regs.view(N * L * K, 4).clamp(min=0)
if N > 0:
with torch.no_grad():
c33_reg_loss = self.iou_loss(
reg_pred_c33.view(N * L * K, 4),
c33_regs_expand, None,
reduction='none').view(N, L, K).detach() # N x L x K
else:
c33_reg_loss = reg_pred_c33.new_zeros((N, L, K)).detach()
c33_reg_loss[invalid_reg] = INF # N x L x K
c33_reg_loss.view(N * L, K)[level_masks.view(N * L), 4] = 0 # real center
c33_reg_loss = c33_reg_loss.view(N, L * K)
if N == 0:
loss_thresh = c33_reg_loss.new_ones((N)).float()
else:
loss_thresh = torch.kthvalue(
c33_reg_loss, self.more_pos_topk, dim=1)[0] # N
loss_thresh[loss_thresh > self.more_pos_thresh] = self.more_pos_thresh # N
new_pos = c33_reg_loss.view(N, L, K) < \
loss_thresh.view(N, 1, 1).expand(N, L, K)
pos_inds = c33_inds[new_pos].view(-1) # P
labels = labels.view(N, 1, 1).expand(N, L, K)[new_pos].view(-1)
return pos_inds, labels
@torch.no_grad()
def _get_c33_inds(self, gt_instances, shapes_per_level):
'''
TODO (Xingyi): The current implementation is ugly. Refactor.
Get the center (and the 3x3 region near center) locations of each objects
Inputs:
gt_instances: [n_i], sum n_i = N
shapes_per_level: L x 2 [(h_l, w_l)]_L
'''
labels = []
level_masks = []
c33_inds = []
c33_masks = []
c33_regs = []
L = len(self.strides)
B = len(gt_instances)
shapes_per_level = shapes_per_level.long()
loc_per_level = (shapes_per_level[:, 0] * shapes_per_level[:, 1]).long() # L
level_bases = []
s = 0
for l in range(L):
level_bases.append(s)
s = s + B * loc_per_level[l]
level_bases = shapes_per_level.new_tensor(level_bases).long() # L
strides_default = shapes_per_level.new_tensor(self.strides).float() # L
K = 9
dx = shapes_per_level.new_tensor([-1, 0, 1, -1, 0, 1, -1, 0, 1]).long()
dy = shapes_per_level.new_tensor([-1, -1, -1, 0, 0, 0, 1, 1, 1]).long()
for im_i in range(B):
targets_per_im = gt_instances[im_i]
bboxes = targets_per_im.gt_boxes.tensor # n x 4
n = bboxes.shape[0]
if n == 0:
continue
centers = ((bboxes[:, [0, 1]] + bboxes[:, [2, 3]]) / 2) # n x 2
centers = centers.view(n, 1, 2).expand(n, L, 2)
strides = strides_default.view(1, L, 1).expand(n, L, 2) #
centers_inds = (centers / strides).long() # n x L x 2
center_grids = centers_inds * strides + strides // 2# n x L x 2
l = center_grids[:, :, 0] - bboxes[:, 0].view(n, 1).expand(n, L)
t = center_grids[:, :, 1] - bboxes[:, 1].view(n, 1).expand(n, L)
r = bboxes[:, 2].view(n, 1).expand(n, L) - center_grids[:, :, 0]
b = bboxes[:, 3].view(n, 1).expand(n, L) - center_grids[:, :, 1] # n x L
reg = torch.stack([l, t, r, b], dim=2) # n x L x 4
reg = reg / strides_default.view(1, L, 1).expand(n, L, 4).float()
Ws = shapes_per_level[:, 1].view(1, L).expand(n, L)
Hs = shapes_per_level[:, 0].view(1, L).expand(n, L)
expand_Ws = Ws.view(n, L, 1).expand(n, L, K)
expand_Hs = Hs.view(n, L, 1).expand(n, L, K)
label = targets_per_im.gt_classes.view(n).clone()
mask = reg.min(dim=2)[0] >= 0 # n x L
mask = mask & self.assign_fpn_level(bboxes)
labels.append(label) # n
level_masks.append(mask) # n x L
Dy = dy.view(1, 1, K).expand(n, L, K)
Dx = dx.view(1, 1, K).expand(n, L, K)
c33_ind = level_bases.view(1, L, 1).expand(n, L, K) + \
im_i * loc_per_level.view(1, L, 1).expand(n, L, K) + \
(centers_inds[:, :, 1:2].expand(n, L, K) + Dy) * expand_Ws + \
(centers_inds[:, :, 0:1].expand(n, L, K) + Dx) # n x L x K
c33_mask = \
((centers_inds[:, :, 1:2].expand(n, L, K) + dy) < expand_Hs) & \
((centers_inds[:, :, 1:2].expand(n, L, K) + dy) >= 0) & \
((centers_inds[:, :, 0:1].expand(n, L, K) + dx) < expand_Ws) & \
((centers_inds[:, :, 0:1].expand(n, L, K) + dx) >= 0)
# TODO (Xingyi): think about better way to implement this
# Currently it hard codes the 3x3 region
c33_reg = reg.view(n, L, 1, 4).expand(n, L, K, 4).clone()
c33_reg[:, :, [0, 3, 6], 0] -= 1
c33_reg[:, :, [0, 3, 6], 2] += 1
c33_reg[:, :, [2, 5, 8], 0] += 1
c33_reg[:, :, [2, 5, 8], 2] -= 1
c33_reg[:, :, [0, 1, 2], 1] -= 1
c33_reg[:, :, [0, 1, 2], 3] += 1
c33_reg[:, :, [6, 7, 8], 1] += 1
c33_reg[:, :, [6, 7, 8], 3] -= 1
c33_mask = c33_mask & (c33_reg.min(dim=3)[0] >= 0) # n x L x K
c33_inds.append(c33_ind)
c33_masks.append(c33_mask)
c33_regs.append(c33_reg)
if len(level_masks) > 0:
labels = torch.cat(labels, dim=0)
level_masks = torch.cat(level_masks, dim=0)
c33_inds = torch.cat(c33_inds, dim=0).long()
c33_regs = torch.cat(c33_regs, dim=0)
c33_masks = torch.cat(c33_masks, dim=0)
else:
labels = shapes_per_level.new_zeros((0)).long()
level_masks = shapes_per_level.new_zeros((0, L)).bool()
c33_inds = shapes_per_level.new_zeros((0, L, K)).long()
c33_regs = shapes_per_level.new_zeros((0, L, K, 4)).float()
c33_masks = shapes_per_level.new_zeros((0, L, K)).bool()
return labels, level_masks, c33_inds, c33_masks, c33_regs # N x L, N x L x K
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/dense_heads/centernet_head.py | Python | import math
from typing import List
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec, get_norm
from detectron2.config import configurable
from ..layers.deform_conv import DFConv2d
__all__ = ["CenterNetHead"]
class Scale(nn.Module):
def __init__(self, init_value=1.0):
super(Scale, self).__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, input):
return input * self.scale
class CenterNetHead(nn.Module):
@configurable
def __init__(self,
# input_shape: List[ShapeSpec],
in_channels,
num_levels,
*,
num_classes=80,
with_agn_hm=False,
only_proposal=False,
norm='GN',
num_cls_convs=4,
num_box_convs=4,
num_share_convs=0,
use_deformable=False,
prior_prob=0.01):
super().__init__()
self.num_classes = num_classes
self.with_agn_hm = with_agn_hm
self.only_proposal = only_proposal
self.out_kernel = 3
head_configs = {
"cls": (num_cls_convs if not self.only_proposal else 0, \
use_deformable),
"bbox": (num_box_convs, use_deformable),
"share": (num_share_convs, use_deformable)}
# in_channels = [s.channels for s in input_shape]
# assert len(set(in_channels)) == 1, \
# "Each level must have the same channel!"
# in_channels = in_channels[0]
channels = {
'cls': in_channels,
'bbox': in_channels,
'share': in_channels,
}
for head in head_configs:
tower = []
num_convs, use_deformable = head_configs[head]
channel = channels[head]
for i in range(num_convs):
if use_deformable and i == num_convs - 1:
conv_func = DFConv2d
else:
conv_func = nn.Conv2d
tower.append(conv_func(
in_channels if i == 0 else channel,
channel,
kernel_size=3, stride=1,
padding=1, bias=True
))
if norm == 'GN' and channel % 32 != 0:
tower.append(nn.GroupNorm(25, channel))
elif norm != '':
tower.append(get_norm(norm, channel))
tower.append(nn.ReLU())
self.add_module('{}_tower'.format(head),
nn.Sequential(*tower))
self.bbox_pred = nn.Conv2d(
in_channels, 4, kernel_size=self.out_kernel,
stride=1, padding=self.out_kernel // 2
)
self.scales = nn.ModuleList(
[Scale(init_value=1.0) for _ in range(num_levels)])
for modules in [
self.cls_tower, self.bbox_tower,
self.share_tower,
self.bbox_pred,
]:
for l in modules.modules():
if isinstance(l, nn.Conv2d):
torch.nn.init.normal_(l.weight, std=0.01)
torch.nn.init.constant_(l.bias, 0)
torch.nn.init.constant_(self.bbox_pred.bias, 8.)
prior_prob = prior_prob
bias_value = -math.log((1 - prior_prob) / prior_prob)
if self.with_agn_hm:
self.agn_hm = nn.Conv2d(
in_channels, 1, kernel_size=self.out_kernel,
stride=1, padding=self.out_kernel // 2
)
torch.nn.init.constant_(self.agn_hm.bias, bias_value)
torch.nn.init.normal_(self.agn_hm.weight, std=0.01)
if not self.only_proposal:
cls_kernel_size = self.out_kernel
self.cls_logits = nn.Conv2d(
in_channels, self.num_classes,
kernel_size=cls_kernel_size,
stride=1,
padding=cls_kernel_size // 2,
)
torch.nn.init.constant_(self.cls_logits.bias, bias_value)
torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
@classmethod
def from_config(cls, cfg, input_shape):
ret = {
# 'input_shape': input_shape,
'in_channels': [s.channels for s in input_shape][0],
'num_levels': len(input_shape),
'num_classes': cfg.MODEL.CENTERNET.NUM_CLASSES,
'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM,
'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL,
'norm': cfg.MODEL.CENTERNET.NORM,
'num_cls_convs': cfg.MODEL.CENTERNET.NUM_CLS_CONVS,
'num_box_convs': cfg.MODEL.CENTERNET.NUM_BOX_CONVS,
'num_share_convs': cfg.MODEL.CENTERNET.NUM_SHARE_CONVS,
'use_deformable': cfg.MODEL.CENTERNET.USE_DEFORMABLE,
'prior_prob': cfg.MODEL.CENTERNET.PRIOR_PROB,
}
return ret
def forward(self, x):
clss = []
bbox_reg = []
agn_hms = []
for l, feature in enumerate(x):
feature = self.share_tower(feature)
cls_tower = self.cls_tower(feature)
bbox_tower = self.bbox_tower(feature)
if not self.only_proposal:
clss.append(self.cls_logits(cls_tower))
else:
clss.append(None)
if self.with_agn_hm:
agn_hms.append(self.agn_hm(bbox_tower))
else:
agn_hms.append(None)
reg = self.bbox_pred(bbox_tower)
reg = self.scales[l](reg)
bbox_reg.append(F.relu(reg))
return clss, bbox_reg, agn_hms | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/dense_heads/utils.py | Python | import cv2
import torch
from torch import nn
from detectron2.utils.comm import get_world_size
from detectron2.structures import pairwise_iou, Boxes
# from .data import CenterNetCrop
import torch.nn.functional as F
import numpy as np
from detectron2.structures import Boxes, ImageList, Instances
__all__ = ['reduce_sum', '_transpose']
INF = 1000000000
def _transpose(training_targets, num_loc_list):
'''
This function is used to transpose image first training targets to
level first ones
:return: level first training targets
'''
for im_i in range(len(training_targets)):
training_targets[im_i] = torch.split(
training_targets[im_i], num_loc_list, dim=0)
targets_level_first = []
for targets_per_level in zip(*training_targets):
targets_level_first.append(
torch.cat(targets_per_level, dim=0))
return targets_level_first
def reduce_sum(tensor):
world_size = get_world_size()
if world_size < 2:
return tensor
tensor = tensor.clone()
torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)
return tensor | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/layers/deform_conv.py | Python | import torch
from torch import nn
from detectron2.layers import Conv2d
class _NewEmptyTensorOp(torch.autograd.Function):
@staticmethod
def forward(ctx, x, new_shape):
ctx.shape = x.shape
return x.new_empty(new_shape)
@staticmethod
def backward(ctx, grad):
shape = ctx.shape
return _NewEmptyTensorOp.apply(grad, shape), None
class DFConv2d(nn.Module):
"""Deformable convolutional layer"""
def __init__(
self,
in_channels,
out_channels,
with_modulated_dcn=True,
kernel_size=3,
stride=1,
groups=1,
dilation=1,
deformable_groups=1,
bias=False,
padding=None
):
super(DFConv2d, self).__init__()
if isinstance(kernel_size, (list, tuple)):
assert isinstance(stride, (list, tuple))
assert isinstance(dilation, (list, tuple))
assert len(kernel_size) == 2
assert len(stride) == 2
assert len(dilation) == 2
padding = (
dilation[0] * (kernel_size[0] - 1) // 2,
dilation[1] * (kernel_size[1] - 1) // 2
)
offset_base_channels = kernel_size[0] * kernel_size[1]
else:
padding = dilation * (kernel_size - 1) // 2
offset_base_channels = kernel_size * kernel_size
if with_modulated_dcn:
from detectron2.layers.deform_conv import ModulatedDeformConv
offset_channels = offset_base_channels * 3 # default: 27
conv_block = ModulatedDeformConv
else:
from detectron2.layers.deform_conv import DeformConv
offset_channels = offset_base_channels * 2 # default: 18
conv_block = DeformConv
self.offset = Conv2d(
in_channels,
deformable_groups * offset_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=1,
dilation=dilation
)
nn.init.constant_(self.offset.weight, 0)
nn.init.constant_(self.offset.bias, 0)
'''
for l in [self.offset, ]:
nn.init.kaiming_uniform_(l.weight, a=1)
torch.nn.init.constant_(l.bias, 0.)
'''
self.conv = conv_block(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
deformable_groups=deformable_groups,
bias=bias
)
self.with_modulated_dcn = with_modulated_dcn
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.offset_split = offset_base_channels * deformable_groups * 2
def forward(self, x, return_offset=False):
if x.numel() > 0:
if not self.with_modulated_dcn:
offset_mask = self.offset(x)
x = self.conv(x, offset_mask)
else:
offset_mask = self.offset(x)
offset = offset_mask[:, :self.offset_split, :, :]
mask = offset_mask[:, self.offset_split:, :, :].sigmoid()
x = self.conv(x, offset, mask)
if return_offset:
return x, offset_mask
return x
# get output shape
output_shape = [
(i + 2 * p - (di * (k - 1) + 1)) // d + 1
for i, p, di, k, d in zip(
x.shape[-2:],
self.padding,
self.dilation,
self.kernel_size,
self.stride
)
]
output_shape = [x.shape[0], self.conv.weight.shape[0]] + output_shape
return _NewEmptyTensorOp.apply(x, output_shape) | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/layers/heatmap_focal_loss.py | Python | import torch
from torch.nn import functional as F
# TODO: merge these two function
def heatmap_focal_loss(
inputs,
targets,
pos_inds,
labels,
alpha: float = -1,
beta: float = 4,
gamma: float = 2,
reduction: str = 'sum',
sigmoid_clamp: float = 1e-4,
ignore_high_fp: float = -1.,
):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: (sum_l N*Hl*Wl, C)
targets: (sum_l N*Hl*Wl, C)
pos_inds: N
labels: N
Returns:
Loss tensor with the reduction option applied.
"""
pred = torch.clamp(inputs.sigmoid_(), min=sigmoid_clamp, max=1-sigmoid_clamp)
neg_weights = torch.pow(1 - targets, beta)
pos_pred_pix = pred[pos_inds] # N x C
pos_pred = pos_pred_pix.gather(1, labels.unsqueeze(1))
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, gamma)
neg_loss = torch.log(1 - pred) * torch.pow(pred, gamma) * neg_weights
if ignore_high_fp > 0:
not_high_fp = (pred < ignore_high_fp).float()
neg_loss = not_high_fp * neg_loss
if reduction == "sum":
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if alpha >= 0:
pos_loss = alpha * pos_loss
neg_loss = (1 - alpha) * neg_loss
return - pos_loss, - neg_loss
heatmap_focal_loss_jit = torch.jit.script(heatmap_focal_loss)
# heatmap_focal_loss_jit = heatmap_focal_loss
def binary_heatmap_focal_loss(
inputs,
targets,
pos_inds,
alpha: float = -1,
beta: float = 4,
gamma: float = 2,
sigmoid_clamp: float = 1e-4,
ignore_high_fp: float = -1.,
):
"""
Args:
inputs: (sum_l N*Hl*Wl,)
targets: (sum_l N*Hl*Wl,)
pos_inds: N
Returns:
Loss tensor with the reduction option applied.
"""
pred = torch.clamp(inputs.sigmoid_(), min=sigmoid_clamp, max=1-sigmoid_clamp)
neg_weights = torch.pow(1 - targets, beta)
pos_pred = pred[pos_inds] # N
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, gamma)
neg_loss = torch.log(1 - pred) * torch.pow(pred, gamma) * neg_weights
if ignore_high_fp > 0:
not_high_fp = (pred < ignore_high_fp).float()
neg_loss = not_high_fp * neg_loss
pos_loss = - pos_loss.sum()
neg_loss = - neg_loss.sum()
if alpha >= 0:
pos_loss = alpha * pos_loss
neg_loss = (1 - alpha) * neg_loss
return pos_loss, neg_loss
binary_heatmap_focal_loss_jit = torch.jit.script(binary_heatmap_focal_loss) | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/layers/iou_loss.py | Python | import torch
from torch import nn
class IOULoss(nn.Module):
def __init__(self, loc_loss_type='iou'):
super(IOULoss, self).__init__()
self.loc_loss_type = loc_loss_type
def forward(self, pred, target, weight=None, reduction='sum'):
pred_left = pred[:, 0]
pred_top = pred[:, 1]
pred_right = pred[:, 2]
pred_bottom = pred[:, 3]
target_left = target[:, 0]
target_top = target[:, 1]
target_right = target[:, 2]
target_bottom = target[:, 3]
target_aera = (target_left + target_right) * \
(target_top + target_bottom)
pred_aera = (pred_left + pred_right) * \
(pred_top + pred_bottom)
w_intersect = torch.min(pred_left, target_left) + \
torch.min(pred_right, target_right)
h_intersect = torch.min(pred_bottom, target_bottom) + \
torch.min(pred_top, target_top)
g_w_intersect = torch.max(pred_left, target_left) + \
torch.max(pred_right, target_right)
g_h_intersect = torch.max(pred_bottom, target_bottom) + \
torch.max(pred_top, target_top)
ac_uion = g_w_intersect * g_h_intersect
area_intersect = w_intersect * h_intersect
area_union = target_aera + pred_aera - area_intersect
ious = (area_intersect + 1.0) / (area_union + 1.0)
gious = ious - (ac_uion - area_union) / ac_uion
if self.loc_loss_type == 'iou':
losses = -torch.log(ious)
elif self.loc_loss_type == 'linear_iou':
losses = 1 - ious
elif self.loc_loss_type == 'giou':
losses = 1 - gious
else:
raise NotImplementedError
if weight is not None:
losses = losses * weight
else:
losses = losses
if reduction == 'sum':
return losses.sum()
elif reduction == 'batch':
return losses.sum(dim=[1])
elif reduction == 'none':
return losses
else:
raise NotImplementedError
def giou_loss(
boxes1: torch.Tensor,
boxes2: torch.Tensor,
reduction: str = "none",
eps: float = 1e-7,
) -> torch.Tensor:
"""
Generalized Intersection over Union Loss (Hamid Rezatofighi et. al)
https://arxiv.org/abs/1902.09630
Gradient-friendly IoU loss with an additional penalty that is non-zero when the
boxes do not overlap and scales with the size of their smallest enclosing box.
This loss is symmetric, so the boxes1 and boxes2 arguments are interchangeable.
Args:
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
reduction: 'none' | 'mean' | 'sum'
'none': No reduction will be applied to the output.
'mean': The output will be averaged.
'sum': The output will be summed.
eps (float): small number to prevent division by zero
"""
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
# Intersection keypoints
xkis1 = torch.max(x1, x1g)
ykis1 = torch.max(y1, y1g)
xkis2 = torch.min(x2, x2g)
ykis2 = torch.min(y2, y2g)
intsctk = torch.zeros_like(x1)
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
intsctk[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsctk
iouk = intsctk / (unionk + eps)
# smallest enclosing box
xc1 = torch.min(x1, x1g)
yc1 = torch.min(y1, y1g)
xc2 = torch.max(x2, x2g)
yc2 = torch.max(y2, y2g)
area_c = (xc2 - xc1) * (yc2 - yc1)
miouk = iouk - ((area_c - unionk) / (area_c + eps))
loss = 1 - miouk
if reduction == "mean":
loss = loss.mean()
elif reduction == "sum":
loss = loss.sum()
return loss | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/layers/ml_nms.py | Python | from detectron2.layers import batched_nms
def ml_nms(boxlist, nms_thresh, max_proposals=-1,
score_field="scores", label_field="labels"):
"""
Performs non-maximum suppression on a boxlist, with scores specified
in a boxlist field via score_field.
Arguments:
boxlist(BoxList)
nms_thresh (float)
max_proposals (int): if > 0, then only the top max_proposals are kept
after non-maximum suppression
score_field (str)
"""
if nms_thresh <= 0:
return boxlist
if boxlist.has('pred_boxes'):
boxes = boxlist.pred_boxes.tensor
labels = boxlist.pred_classes
else:
boxes = boxlist.proposal_boxes.tensor
labels = boxlist.proposal_boxes.tensor.new_zeros(
len(boxlist.proposal_boxes.tensor))
scores = boxlist.scores
keep = batched_nms(boxes, scores, labels, nms_thresh)
if max_proposals > 0:
keep = keep[: max_proposals]
boxlist = boxlist[keep]
return boxlist
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/meta_arch/centernet_detector.py | Python | import math
import json
import numpy as np
import torch
from torch import nn
from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY
from detectron2.modeling import build_backbone, build_proposal_generator
from detectron2.modeling import detector_postprocess
from detectron2.structures import ImageList
@META_ARCH_REGISTRY.register()
class CenterNetDetector(nn.Module):
def __init__(self, cfg):
super().__init__()
self.mean, self.std = cfg.MODEL.PIXEL_MEAN, cfg.MODEL.PIXEL_STD
self.register_buffer("pixel_mean", torch.Tensor(cfg.MODEL.PIXEL_MEAN).view(-1, 1, 1))
self.register_buffer("pixel_std", torch.Tensor(cfg.MODEL.PIXEL_STD).view(-1, 1, 1))
self.backbone = build_backbone(cfg)
self.proposal_generator = build_proposal_generator(
cfg, self.backbone.output_shape()) # TODO: change to a more precise name
def forward(self, batched_inputs):
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
_, proposal_losses = self.proposal_generator(
images, features, gt_instances)
return proposal_losses
@property
def device(self):
return self.pixel_mean.device
@torch.no_grad()
def inference(self, batched_inputs, do_postprocess=True):
images = self.preprocess_image(batched_inputs)
inp = images.tensor
features = self.backbone(inp)
proposals, _ = self.proposal_generator(images, features, None)
processed_results = []
for results_per_image, input_per_image, image_size in zip(
proposals, batched_inputs, images.image_sizes):
if do_postprocess:
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
else:
r = results_per_image
processed_results.append(r)
return processed_results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
return images
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/roi_heads/custom_fast_rcnn.py | Python | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Part of the code is from https://github.com/tztztztztz/eql.detectron2/blob/master/projects/EQL/eql/fast_rcnn.py
import logging
import math
import json
from typing import Dict, Union
import torch
from fvcore.nn import giou_loss, smooth_l1_loss
from torch import nn
from torch.nn import functional as F
from detectron2.config import configurable
from detectron2.layers import Linear, ShapeSpec, batched_nms, cat, nonzero_tuple
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.structures import Boxes, Instances
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.modeling.roi_heads.fast_rcnn import _log_classification_stats
from detectron2.utils.comm import get_world_size
from .fed_loss import load_class_freq, get_fed_loss_inds
__all__ = ["CustomFastRCNNOutputLayers"]
class CustomFastRCNNOutputLayers(FastRCNNOutputLayers):
def __init__(
self,
cfg,
input_shape: ShapeSpec,
**kwargs
):
super().__init__(cfg, input_shape, **kwargs)
self.use_sigmoid_ce = cfg.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE
if self.use_sigmoid_ce:
prior_prob = cfg.MODEL.ROI_BOX_HEAD.PRIOR_PROB
bias_value = -math.log((1 - prior_prob) / prior_prob)
nn.init.constant_(self.cls_score.bias, bias_value)
self.cfg = cfg
self.use_fed_loss = cfg.MODEL.ROI_BOX_HEAD.USE_FED_LOSS
if self.use_fed_loss:
self.fed_loss_num_cat = cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CAT
self.register_buffer(
'freq_weight',
load_class_freq(
cfg.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH,
cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT,
)
)
def losses(self, predictions, proposals):
"""
enable advanced loss
"""
scores, proposal_deltas = predictions
gt_classes = (
cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)
)
num_classes = self.num_classes
_log_classification_stats(scores, gt_classes)
if len(proposals):
proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4
assert not proposal_boxes.requires_grad, "Proposals should not require gradients!"
gt_boxes = cat(
[(p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes).tensor for p in proposals],
dim=0,
)
else:
proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)
if self.use_sigmoid_ce:
loss_cls = self.sigmoid_cross_entropy_loss(scores, gt_classes)
else:
loss_cls = self.softmax_cross_entropy_loss(scores, gt_classes)
return {
"loss_cls": loss_cls,
"loss_box_reg": self.box_reg_loss(
proposal_boxes, gt_boxes, proposal_deltas, gt_classes)
}
def sigmoid_cross_entropy_loss(self, pred_class_logits, gt_classes):
if pred_class_logits.numel() == 0:
return pred_class_logits.new_zeros([1])[0] # This is more robust than .sum() * 0.
B = pred_class_logits.shape[0]
C = pred_class_logits.shape[1] - 1
target = pred_class_logits.new_zeros(B, C + 1)
target[range(len(gt_classes)), gt_classes] = 1 # B x (C + 1)
target = target[:, :C] # B x C
weight = 1
if self.use_fed_loss and (self.freq_weight is not None): # fedloss
appeared = get_fed_loss_inds(
gt_classes,
num_sample_cats=self.fed_loss_num_cat,
C=C,
weight=self.freq_weight)
appeared_mask = appeared.new_zeros(C + 1)
appeared_mask[appeared] = 1 # C + 1
appeared_mask = appeared_mask[:C]
fed_w = appeared_mask.view(1, C).expand(B, C)
weight = weight * fed_w.float()
cls_loss = F.binary_cross_entropy_with_logits(
pred_class_logits[:, :-1], target, reduction='none') # B x C
loss = torch.sum(cls_loss * weight) / B
return loss
def softmax_cross_entropy_loss(self, pred_class_logits, gt_classes):
"""
change _no_instance handling
"""
if pred_class_logits.numel() == 0:
return pred_class_logits.new_zeros([1])[0]
if self.use_fed_loss and (self.freq_weight is not None):
C = pred_class_logits.shape[1] - 1
appeared = get_fed_loss_inds(
gt_classes,
num_sample_cats=self.fed_loss_num_cat,
C=C,
weight=self.freq_weight)
appeared_mask = appeared.new_zeros(C + 1).float()
appeared_mask[appeared] = 1. # C + 1
appeared_mask[C] = 1.
loss = F.cross_entropy(
pred_class_logits, gt_classes,
weight=appeared_mask, reduction="mean")
else:
loss = F.cross_entropy(
pred_class_logits, gt_classes, reduction="mean")
return loss
def inference(self, predictions, proposals):
"""
enable use proposal boxes
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
if self.cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE:
proposal_scores = [p.get('objectness_logits') for p in proposals]
scores = [(s * ps[:, None]) ** 0.5 \
for s, ps in zip(scores, proposal_scores)]
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_probs(self, predictions, proposals):
"""
support sigmoid
"""
scores, _ = predictions
num_inst_per_image = [len(p) for p in proposals]
if self.use_sigmoid_ce:
probs = scores.sigmoid()
else:
probs = F.softmax(scores, dim=-1)
return probs.split(num_inst_per_image, dim=0)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/roi_heads/custom_roi_heads.py | Python | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import json
import math
import torch
from torch import nn
from torch.autograd.function import Function
from typing import Dict, List, Optional, Tuple, Union
from detectron2.layers import ShapeSpec
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference
from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads
from detectron2.modeling.roi_heads.box_head import build_box_head
from .custom_fast_rcnn import CustomFastRCNNOutputLayers
@ROI_HEADS_REGISTRY.register()
class CustomROIHeads(StandardROIHeads):
@classmethod
def _init_box_head(self, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
del ret['box_predictor']
ret['box_predictor'] = CustomFastRCNNOutputLayers(
cfg, ret['box_head'].output_shape)
self.debug = cfg.DEBUG
if self.debug:
self.debug_show_name = cfg.DEBUG_SHOW_NAME
self.save_debug = cfg.SAVE_DEBUG
self.vis_thresh = cfg.VIS_THRESH
self.pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(
torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
self.pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(
torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
return ret
def forward(self, images, features, proposals, targets=None):
"""
enable debug
"""
if not self.debug:
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self.training:
losses = self._forward_box(features, proposals)
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
pred_instances = self._forward_box(features, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
if self.debug:
from ..debug import debug_second_stage
denormalizer = lambda x: x * self.pixel_std + self.pixel_mean
debug_second_stage(
[denormalizer(images[0].clone())],
pred_instances, proposals=proposals,
debug_show_name=self.debug_show_name)
return pred_instances, {}
@ROI_HEADS_REGISTRY.register()
class CustomCascadeROIHeads(CascadeROIHeads):
@classmethod
def _init_box_head(self, cfg, input_shape):
self.mult_proposal_score = cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE
ret = super()._init_box_head(cfg, input_shape)
del ret['box_predictors']
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
box_predictors = []
for box_head, bbox_reg_weights in zip(ret['box_heads'], cascade_bbox_reg_weights):
box_predictors.append(
CustomFastRCNNOutputLayers(
cfg, box_head.output_shape,
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights)
))
ret['box_predictors'] = box_predictors
self.debug = cfg.DEBUG
if self.debug:
self.debug_show_name = cfg.DEBUG_SHOW_NAME
self.save_debug = cfg.SAVE_DEBUG
self.vis_thresh = cfg.VIS_THRESH
self.pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(
torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
self.pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(
torch.device(cfg.MODEL.DEVICE)).view(3, 1, 1)
return ret
def _forward_box(self, features, proposals, targets=None):
"""
Add mult proposal scores at testing
"""
if (not self.training) and self.mult_proposal_score:
if len(proposals) > 0 and proposals[0].has('scores'):
proposal_scores = [
p.get('scores') for p in proposals]
else:
proposal_scores = [
p.get('objectness_logits') for p in proposals]
features = [features[f] for f in self.box_in_features]
head_outputs = [] # (predictor, predictions, proposals)
prev_pred_boxes = None
image_sizes = [x.image_size for x in proposals]
for k in range(self.num_cascade_stages):
if k > 0:
proposals = self._create_proposals_from_boxes(prev_pred_boxes, image_sizes)
if self.training:
proposals = self._match_and_label_boxes(proposals, k, targets)
predictions = self._run_stage(features, proposals, k)
prev_pred_boxes = self.box_predictor[k].predict_boxes(predictions, proposals)
head_outputs.append((self.box_predictor[k], predictions, proposals))
if self.training:
losses = {}
storage = get_event_storage()
for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
with storage.name_scope("stage{}".format(stage)):
stage_losses = predictor.losses(predictions, proposals)
losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()})
return losses
else:
# Each is a list[Tensor] of length #image. Each tensor is Ri x (K+1)
scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
scores = [
sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
for scores_per_image in zip(*scores_per_stage)
]
if self.mult_proposal_score:
scores = [(s * ps[:, None]) ** 0.5 \
for s, ps in zip(scores, proposal_scores)]
predictor, predictions, proposals = head_outputs[-1]
boxes = predictor.predict_boxes(predictions, proposals)
pred_instances, _ = fast_rcnn_inference(
boxes,
scores,
image_sizes,
predictor.test_score_thresh,
predictor.test_nms_thresh,
predictor.test_topk_per_image,
)
return pred_instances
def forward(self, images, features, proposals, targets=None):
'''
enable debug
'''
if not self.debug:
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
if self.training:
losses = self._forward_box(features, proposals, targets)
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_keypoint(features, proposals))
return proposals, losses
else:
# import pdb; pdb.set_trace()
pred_instances = self._forward_box(features, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
if self.debug:
from ..debug import debug_second_stage
denormalizer = lambda x: x * self.pixel_std + self.pixel_mean
debug_second_stage(
[denormalizer(x.clone()) for x in images],
pred_instances, proposals=proposals,
save_debug=self.save_debug,
debug_show_name=self.debug_show_name,
vis_thresh=self.vis_thresh)
return pred_instances, {}
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
centernet/modeling/roi_heads/fed_loss.py | Python | import torch
import json
import numpy as np
from torch.nn import functional as F
def load_class_freq(
path='datasets/lvis/lvis_v1_train_cat_info.json',
freq_weight=0.5):
cat_info = json.load(open(path, 'r'))
cat_info = torch.tensor(
[c['image_count'] for c in sorted(cat_info, key=lambda x: x['id'])])
freq_weight = cat_info.float() ** freq_weight
return freq_weight
def get_fed_loss_inds(
gt_classes, num_sample_cats=50, C=1203, \
weight=None, fed_cls_inds=-1):
appeared = torch.unique(gt_classes) # C'
prob = appeared.new_ones(C + 1).float()
prob[-1] = 0
if len(appeared) < num_sample_cats:
if weight is not None:
prob[:C] = weight.float().clone()
prob[appeared] = 0
if fed_cls_inds > 0:
prob[fed_cls_inds:] = 0
more_appeared = torch.multinomial(
prob, num_sample_cats - len(appeared),
replacement=False)
appeared = torch.cat([appeared, more_appeared])
return appeared | xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
demo.py | Python | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import glob
import multiprocessing as mp
import os
import time
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
from centernet.config import add_centernet_config
# constants
WINDOW_NAME = "CenterNet2 detections"
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.data import MetadataCatalog
def setup_cfg(args):
# load config from file and command-line arguments
cfg = get_cfg()
add_centernet_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# Set score_threshold for builtin models
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
if cfg.MODEL.META_ARCHITECTURE in ['ProposalNetwork', 'CenterNetDetector']:
cfg.MODEL.CENTERNET.INFERENCE_TH = args.confidence_threshold
cfg.MODEL.CENTERNET.NMS_TH = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models")
parser.add_argument(
"--config-file",
default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
parser.add_argument("--video-input", help="Path to video file.")
parser.add_argument("--input", nargs="+", help="A list of space separated input images")
parser.add_argument(
"--output",
help="A file or directory to save output visualizations. "
"If not given, will show output in an OpenCV window.",
)
parser.add_argument(
"--confidence-threshold",
type=float,
default=0.3,
help="Minimum score for instance predictions to be shown",
)
parser.add_argument(
"--opts",
help="Modify config options using the command-line 'KEY VALUE' pairs",
default=[],
nargs=argparse.REMAINDER,
)
return parser
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
demo = VisualizationDemo(cfg)
output_file = None
if args.input:
if len(args.input) == 1:
args.input = glob.glob(os.path.expanduser(args.input[0]))
files = os.listdir(args.input[0])
args.input = [args.input[0] + x for x in files]
assert args.input, "The input path(s) was not found"
visualizer = VideoVisualizer(
MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
),
instance_mode=ColorMode.IMAGE)
for path in tqdm.tqdm(args.input, disable=not args.output):
# use PIL, to be consistent with evaluation
img = read_image(path, format="BGR")
start_time = time.time()
predictions, visualized_output = demo.run_on_image(
img, visualizer=visualizer)
if 'instances' in predictions:
logger.info(
"{}: detected {} instances in {:.2f}s".format(
path, len(predictions["instances"]), time.time() - start_time
)
)
else:
logger.info(
"{}: detected {} instances in {:.2f}s".format(
path, len(predictions["proposals"]), time.time() - start_time
)
)
if args.output:
if os.path.isdir(args.output):
assert os.path.isdir(args.output), args.output
out_filename = os.path.join(args.output, os.path.basename(path))
visualized_output.save(out_filename)
else:
# assert len(args.input) == 1, "Please specify a directory with args.output"
# out_filename = args.output
if output_file is None:
width = visualized_output.get_image().shape[1]
height = visualized_output.get_image().shape[0]
frames_per_second = 15
output_file = cv2.VideoWriter(
filename=args.output,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*"x264"),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
output_file.write(visualized_output.get_image()[:, :, ::-1])
else:
# cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
if cv2.waitKey(1 ) == 27:
break # esc to quit
elif args.webcam:
assert args.input is None, "Cannot have both --input and --webcam!"
cam = cv2.VideoCapture(0)
for vis in tqdm.tqdm(demo.run_on_video(cam)):
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.imshow(WINDOW_NAME, vis)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
elif args.video_input:
video = cv2.VideoCapture(args.video_input)
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
frames_per_second = 15 # video.get(cv2.CAP_PROP_FPS)
num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
basename = os.path.basename(args.video_input)
if args.output:
if os.path.isdir(args.output):
output_fname = os.path.join(args.output, basename)
output_fname = os.path.splitext(output_fname)[0] + ".mkv"
else:
output_fname = args.output
# assert not os.path.isfile(output_fname), output_fname
output_file = cv2.VideoWriter(
filename=output_fname,
# some installation of opencv may not support x264 (due to its license),
# you can try other format (e.g. MPEG)
fourcc=cv2.VideoWriter_fourcc(*"x264"),
fps=float(frames_per_second),
frameSize=(width, height),
isColor=True,
)
assert os.path.isfile(args.video_input)
for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
if args.output:
output_file.write(vis_frame)
cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
cv2.imshow(basename, vis_frame)
if cv2.waitKey(1) == 27:
break # esc to quit
video.release()
if args.output:
output_file.release()
else:
cv2.destroyAllWindows()
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
predictor.py | Python | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo(object):
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0] if len(cfg.DATASETS.TRAIN) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def run_on_image(self, image, visualizer=None):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
use_video_vis = True
if visualizer is None:
use_video_vis = False
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
if use_video_vis:
vis_output = visualizer.draw_instance_predictions(
image, predictions=instances)
else:
vis_output = visualizer.draw_instance_predictions(predictions=instances)
elif "proposals" in predictions:
instances = predictions["proposals"].to(self.cpu_device)
instances.pred_boxes = instances.proposal_boxes
instances.scores = instances.objectness_logits
instances.pred_classes[:] = -1
if use_video_vis:
vis_output = visualizer.draw_instance_predictions(
image, predictions=instances)
else:
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
elif "proposals" in predictions:
predictions = predictions["proposals"].to(self.cpu_device)
predictions.pred_boxes = predictions.proposal_boxes
predictions.scores = predictions.objectness_logits
predictions.pred_classes[:] = -1
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/analyze_model.py | Python | # -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from collections import Counter
import tqdm
from fvcore.nn import flop_count_table # can also try flop_count_str
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.utils.analysis import (
FlopCountAnalysis,
activation_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
if args.config_file.endswith(".yaml"):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
else:
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(name="fvcore")
setup_logger()
return cfg
def do_flop(cfg):
if isinstance(cfg, CfgNode):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_flops = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
flops = FlopCountAnalysis(model, data)
if idx > 0:
flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
counts += flops.by_operator()
total_flops.append(flops.total())
logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops))
logger.info(
"Average GFlops for each type of operators:\n"
+ str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])
)
logger.info(
"Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9)
)
def do_activation(cfg):
if isinstance(cfg, CfgNode):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
else:
data_loader = instantiate(cfg.dataloader.test)
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
model.eval()
counts = Counter()
total_activations = []
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
count = activation_count_operators(model, data)
counts += count
total_activations.append(sum(count.values()))
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
logger.info(
"Total (Million) Activations: {}±{}".format(
np.mean(total_activations), np.std(total_activations)
)
)
def do_parameter(cfg):
if isinstance(cfg, CfgNode):
model = build_model(cfg)
else:
model = instantiate(cfg.model)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def do_structure(cfg):
if isinstance(cfg, CfgNode):
model = build_model(cfg)
else:
model = instantiate(cfg.model)
logger.info("Model Structure:\n" + str(model))
if __name__ == "__main__":
parser = default_argument_parser(
epilog="""
Examples:
To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:
$ ./analyze_model.py --num-inputs 100 --tasks flop \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
MODEL.WEIGHTS /path/to/model.pkl
"""
)
parser.add_argument(
"--tasks",
choices=["flop", "activation", "parameter", "structure"],
required=True,
nargs="+",
)
parser.add_argument(
"-n",
"--num-inputs",
default=100,
type=int,
help="number of inputs used to compute statistics for flops/activations, "
"both are data dependent.",
)
args = parser.parse_args()
assert not args.eval_only
assert args.num_gpus == 1
cfg = setup(args)
for task in args.tasks:
{
"flop": do_flop,
"activation": do_activation,
"parameter": do_parameter,
"structure": do_structure,
}[task](cfg)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/benchmark.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
A script to benchmark builtin models.
Note: this script has an extra dependency of psutil.
"""
import itertools
import logging
import psutil
import torch
import tqdm
from fvcore.common.timer import Timer
from torch.nn.parallel import DistributedDataParallel
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, get_cfg, instantiate
from detectron2.data import (
DatasetFromList,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.data.benchmark import DataLoaderBenchmark
from detectron2.engine import AMPTrainer, SimpleTrainer, default_argument_parser, hooks, launch
from detectron2.modeling import build_model
from detectron2.solver import build_optimizer
from detectron2.utils import comm
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.events import CommonMetricPrinter
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
if args.config_file.endswith(".yaml"):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway.
cfg.merge_from_list(args.opts)
cfg.freeze()
else:
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
setup_logger(distributed_rank=comm.get_rank())
return cfg
def create_data_benchmark(cfg, args):
if args.config_file.endswith(".py"):
dl_cfg = cfg.dataloader.train
dl_cfg._target_ = DataLoaderBenchmark
return instantiate(dl_cfg)
else:
kwargs = build_detection_train_loader.from_config(cfg)
kwargs.pop("aspect_ratio_grouping", None)
kwargs["_target_"] = DataLoaderBenchmark
return instantiate(kwargs)
def RAM_msg():
vram = psutil.virtual_memory()
return "RAM Usage: {:.2f}/{:.2f} GB".format(
(vram.total - vram.available) / 1024 ** 3, vram.total / 1024 ** 3
)
def benchmark_data(args):
cfg = setup(args)
logger.info("After spawning " + RAM_msg())
benchmark = create_data_benchmark(cfg, args)
benchmark.benchmark_distributed(250, 10)
# test for a few more rounds
for k in range(10):
logger.info(f"Iteration {k} " + RAM_msg())
benchmark.benchmark_distributed(250, 1)
def benchmark_data_advanced(args):
# benchmark dataloader with more details to help analyze performance bottleneck
cfg = setup(args)
benchmark = create_data_benchmark(cfg, args)
if comm.get_rank() == 0:
benchmark.benchmark_dataset(100)
benchmark.benchmark_mapper(100)
benchmark.benchmark_workers(100, warmup=10)
benchmark.benchmark_IPC(100, warmup=10)
if comm.get_world_size() > 1:
benchmark.benchmark_distributed(100)
logger.info("Rerun ...")
benchmark.benchmark_distributed(100)
def benchmark_train(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if comm.get_world_size() > 1:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
optimizer = build_optimizer(cfg, model)
checkpointer = DetectionCheckpointer(model, optimizer=optimizer)
checkpointer.load(cfg.MODEL.WEIGHTS)
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 2
data_loader = build_detection_train_loader(cfg)
dummy_data = list(itertools.islice(data_loader, 100))
def f():
data = DatasetFromList(dummy_data, copy=False, serialize=False)
while True:
yield from data
max_iter = 400
trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(model, f(), optimizer)
trainer.register_hooks(
[
hooks.IterationTimer(),
hooks.PeriodicWriter([CommonMetricPrinter(max_iter)]),
hooks.TorchProfiler(
lambda trainer: trainer.iter == max_iter - 1, cfg.OUTPUT_DIR, save_tensorboard=True
),
]
)
trainer.train(1, max_iter)
@torch.no_grad()
def benchmark_eval(args):
cfg = setup(args)
if args.config_file.endswith(".yaml"):
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 0
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
else:
model = instantiate(cfg.model)
model.to(cfg.train.device)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
cfg.dataloader.num_workers = 0
data_loader = instantiate(cfg.dataloader.test)
model.eval()
logger.info("Model:\n{}".format(model))
dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)), copy=False)
def f():
while True:
yield from dummy_data
for k in range(5): # warmup
model(dummy_data[k])
max_iter = 300
timer = Timer()
with tqdm.tqdm(total=max_iter) as pbar:
for idx, d in enumerate(f()):
if idx == max_iter:
break
model(d)
pbar.update()
logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
if __name__ == "__main__":
parser = default_argument_parser()
parser.add_argument("--task", choices=["train", "eval", "data", "data_advanced"], required=True)
args = parser.parse_args()
assert not args.eval_only
logger.info("Environment info:\n" + collect_env_info())
if "data" in args.task:
print("Initial " + RAM_msg())
if args.task == "data":
f = benchmark_data
if args.task == "data_advanced":
f = benchmark_data_advanced
elif args.task == "train":
"""
Note: training speed may not be representative.
The training cost of a R-CNN model varies with the content of the data
and the quality of the model.
"""
f = benchmark_train
elif args.task == "eval":
f = benchmark_eval
# only benchmark single-GPU inference.
assert args.num_gpus == 1 and args.num_machines == 1
launch(f, args.num_gpus, args.num_machines, args.machine_rank, args.dist_url, args=(args,))
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/convert-torchvision-to-d2.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle as pkl
import sys
import torch
"""
Usage:
# download one of the ResNet{18,34,50,101,152} models from torchvision:
wget https://download.pytorch.org/models/resnet50-19c8e357.pth -O r50.pth
# run the conversion
./convert-torchvision-to-d2.py r50.pth r50.pkl
# Then, use r50.pkl with the following changes in config:
MODEL:
WEIGHTS: "/path/to/r50.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
RESNETS:
DEPTH: 50
STRIDE_IN_1X1: False
INPUT:
FORMAT: "RGB"
These models typically produce slightly worse results than the
pre-trained ResNets we use in official configs, which are the
original ResNet models released by MSRA.
"""
if __name__ == "__main__":
input = sys.argv[1]
obj = torch.load(input, map_location="cpu")
newmodel = {}
for k in list(obj.keys()):
old_k = k
if "layer" not in k:
k = "stem." + k
for t in [1, 2, 3, 4]:
k = k.replace("layer{}".format(t), "res{}".format(t + 1))
for t in [1, 2, 3]:
k = k.replace("bn{}".format(t), "conv{}.norm".format(t))
k = k.replace("downsample.0", "shortcut")
k = k.replace("downsample.1", "shortcut.norm")
print(old_k, "->", k)
newmodel[k] = obj.pop(old_k).detach().numpy()
res = {"model": newmodel, "__author__": "torchvision", "matching_heuristics": True}
with open(sys.argv[2], "wb") as f:
pkl.dump(res, f)
if obj:
print("Unconverted keys:", obj.keys())
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/deploy/export_model.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from typing import Dict, List, Tuple
import torch
from torch import Tensor, nn
import detectron2.data.transforms as T
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, detection_utils
from detectron2.evaluation import COCOEvaluator, inference_on_dataset, print_csv_format
from detectron2.export import TracingAdapter, dump_torchscript_IR, scripting_with_instances
from detectron2.modeling import GeneralizedRCNN, RetinaNet, build_model
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.projects.point_rend import add_pointrend_config
from detectron2.structures import Boxes
from detectron2.utils.env import TORCH_VERSION
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
def setup_cfg(args):
cfg = get_cfg()
# cuda context is initialized before creating dataloader, so we don't fork anymore
cfg.DATALOADER.NUM_WORKERS = 0
add_pointrend_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def export_caffe2_tracing(cfg, torch_model, inputs):
from detectron2.export import Caffe2Tracer
tracer = Caffe2Tracer(cfg, torch_model, inputs)
if args.format == "caffe2":
caffe2_model = tracer.export_caffe2()
caffe2_model.save_protobuf(args.output)
# draw the caffe2 graph
caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=inputs)
return caffe2_model
elif args.format == "onnx":
import onnx
onnx_model = tracer.export_onnx()
onnx.save(onnx_model, os.path.join(args.output, "model.onnx"))
elif args.format == "torchscript":
ts_model = tracer.export_torchscript()
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# experimental. API not yet final
def export_scripting(torch_model):
assert TORCH_VERSION >= (1, 8)
fields = {
"proposal_boxes": Boxes,
"objectness_logits": Tensor,
"pred_boxes": Boxes,
"scores": Tensor,
"pred_classes": Tensor,
"pred_masks": Tensor,
"pred_keypoints": torch.Tensor,
"pred_keypoint_heatmaps": torch.Tensor,
}
assert args.format == "torchscript", "Scripting only supports torchscript format."
class ScriptableAdapterBase(nn.Module):
# Use this adapter to workaround https://github.com/pytorch/pytorch/issues/46944
# by not retuning instances but dicts. Otherwise the exported model is not deployable
def __init__(self):
super().__init__()
self.model = torch_model
self.eval()
if isinstance(torch_model, GeneralizedRCNN):
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model.inference(inputs, do_postprocess=False)
return [i.get_fields() for i in instances]
else:
class ScriptableAdapter(ScriptableAdapterBase):
def forward(self, inputs: Tuple[Dict[str, torch.Tensor]]) -> List[Dict[str, Tensor]]:
instances = self.model(inputs)
return [i.get_fields() for i in instances]
ts_model = scripting_with_instances(ScriptableAdapter(), fields)
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
# TODO inference in Python now missing postprocessing glue code
return None
# experimental. API not yet final
def export_tracing(torch_model, inputs):
assert TORCH_VERSION >= (1, 8)
image = inputs[0]["image"]
inputs = [{"image": image}] # remove other unused keys
if isinstance(torch_model, GeneralizedRCNN):
def inference(model, inputs):
# use do_postprocess=False so it returns ROI mask
inst = model.inference(inputs, do_postprocess=False)[0]
return [{"instances": inst}]
else:
inference = None # assume that we just call the model directly
traceable_model = TracingAdapter(torch_model, inputs, inference)
if args.format == "torchscript":
ts_model = torch.jit.trace(traceable_model, (image,))
with PathManager.open(os.path.join(args.output, "model.ts"), "wb") as f:
torch.jit.save(ts_model, f)
dump_torchscript_IR(ts_model, args.output)
elif args.format == "onnx":
with PathManager.open(os.path.join(args.output, "model.onnx"), "wb") as f:
torch.onnx.export(traceable_model, (image,), f, opset_version=11)
logger.info("Inputs schema: " + str(traceable_model.inputs_schema))
logger.info("Outputs schema: " + str(traceable_model.outputs_schema))
if args.format != "torchscript":
return None
if not isinstance(torch_model, (GeneralizedRCNN, RetinaNet)):
return None
def eval_wrapper(inputs):
"""
The exported model does not contain the final resize step, which is typically
unused in deployment but needed for evaluation. We add it manually here.
"""
input = inputs[0]
instances = traceable_model.outputs_schema(ts_model(input["image"]))[0]["instances"]
postprocessed = detector_postprocess(instances, input["height"], input["width"])
return [{"instances": postprocessed}]
return eval_wrapper
def get_sample_inputs(args):
if args.sample_image is None:
# get a first batch from dataset
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
first_batch = next(iter(data_loader))
return first_batch
else:
# get a sample data
original_image = detection_utils.read_image(args.sample_image, format=cfg.INPUT.FORMAT)
# Do same preprocessing as DefaultPredictor
aug = T.ResizeShortestEdge(
[cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
)
height, width = original_image.shape[:2]
image = aug.get_transform(original_image).apply_image(original_image)
image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
inputs = {"image": image, "height": height, "width": width}
# Sample ready
sample_inputs = [inputs]
return sample_inputs
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Export a model for deployment.")
parser.add_argument(
"--format",
choices=["caffe2", "onnx", "torchscript"],
help="output format",
default="torchscript",
)
parser.add_argument(
"--export-method",
choices=["caffe2_tracing", "tracing", "scripting"],
help="Method to export models",
default="tracing",
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--sample-image", default=None, type=str, help="sample image for input")
parser.add_argument("--run-eval", action="store_true")
parser.add_argument("--output", help="output directory for the converted model")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
logger = setup_logger()
logger.info("Command line arguments: " + str(args))
PathManager.mkdirs(args.output)
# Disable respecialization on new shapes. Otherwise --run-eval will be slow
torch._C._jit_set_bailout_depth(1)
cfg = setup_cfg(args)
# create a torch model
torch_model = build_model(cfg)
DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS)
torch_model.eval()
# get sample data
sample_inputs = get_sample_inputs(args)
# convert and save model
if args.export_method == "caffe2_tracing":
exported_model = export_caffe2_tracing(cfg, torch_model, sample_inputs)
elif args.export_method == "scripting":
exported_model = export_scripting(torch_model)
elif args.export_method == "tracing":
exported_model = export_tracing(torch_model, sample_inputs)
# run evaluation with the converted model
if args.run_eval:
assert exported_model is not None, (
"Python inference is not yet implemented for "
f"export_method={args.export_method}, format={args.format}."
)
logger.info("Running evaluation ... this takes a long time if you export to CPU.")
dataset = cfg.DATASETS.TEST[0]
data_loader = build_detection_test_loader(cfg, dataset)
# NOTE: hard-coded evaluator. change to the evaluator for your dataset
evaluator = COCOEvaluator(dataset, output_dir=args.output)
metrics = inference_on_dataset(exported_model, data_loader, evaluator)
print_csv_format(metrics)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/deploy/torchscript_mask_rcnn.cpp | C++ | // Copyright (c) Facebook, Inc. and its affiliates.
// @lint-ignore-every CLANGTIDY
// This is an example code that demonstrates how to run inference
// with a torchscript format Mask R-CNN model exported by ./export_model.py
// using export method=tracing, caffe2_tracing & scripting.
#include <opencv2/opencv.hpp>
#include <iostream>
#include <string>
#include <c10/cuda/CUDAStream.h>
#include <torch/csrc/autograd/grad_mode.h>
#include <torch/csrc/jit/runtime/graph_executor.h>
#include <torch/script.h>
// only needed for export_method=tracing
#include <torchvision/vision.h> // @oss-only
// @fb-only: #include <torchvision/csrc/vision.h>
using namespace std;
c10::IValue get_caffe2_tracing_inputs(cv::Mat& img, c10::Device device) {
const int height = img.rows;
const int width = img.cols;
// FPN models require divisibility of 32.
// Tracing mode does padding inside the graph, but caffe2_tracing does not.
assert(height % 32 == 0 && width % 32 == 0);
const int channels = 3;
auto input =
torch::from_blob(img.data, {1, height, width, channels}, torch::kUInt8);
// NHWC to NCHW
input = input.to(device, torch::kFloat).permute({0, 3, 1, 2}).contiguous();
std::array<float, 3> im_info_data{height * 1.0f, width * 1.0f, 1.0f};
auto im_info =
torch::from_blob(im_info_data.data(), {1, 3}).clone().to(device);
return std::make_tuple(input, im_info);
}
c10::IValue get_tracing_inputs(cv::Mat& img, c10::Device device) {
const int height = img.rows;
const int width = img.cols;
const int channels = 3;
auto input =
torch::from_blob(img.data, {height, width, channels}, torch::kUInt8);
// HWC to CHW
input = input.to(device, torch::kFloat).permute({2, 0, 1}).contiguous();
return input;
}
// create a Tuple[Dict[str, Tensor]] which is the input type of scripted model
c10::IValue get_scripting_inputs(cv::Mat& img, c10::Device device) {
const int height = img.rows;
const int width = img.cols;
const int channels = 3;
auto img_tensor =
torch::from_blob(img.data, {height, width, channels}, torch::kUInt8);
// HWC to CHW
img_tensor =
img_tensor.to(device, torch::kFloat).permute({2, 0, 1}).contiguous();
auto dic = c10::Dict<std::string, torch::Tensor>();
dic.insert("image", img_tensor);
return std::make_tuple(dic);
}
c10::IValue
get_inputs(std::string export_method, cv::Mat& img, c10::Device device) {
// Given an image, create inputs in the format required by the model.
if (export_method == "tracing")
return get_tracing_inputs(img, device);
if (export_method == "caffe2_tracing")
return get_caffe2_tracing_inputs(img, device);
if (export_method == "scripting")
return get_scripting_inputs(img, device);
abort();
}
struct MaskRCNNOutputs {
at::Tensor pred_boxes, pred_classes, pred_masks, scores;
int num_instances() const {
return pred_boxes.sizes()[0];
}
};
MaskRCNNOutputs get_outputs(std::string export_method, c10::IValue outputs) {
// Given outputs of the model, extract tensors from it to turn into a
// common MaskRCNNOutputs format.
if (export_method == "tracing") {
auto out_tuple = outputs.toTuple()->elements();
// They are ordered alphabetically by their field name in Instances
return MaskRCNNOutputs{
out_tuple[0].toTensor(),
out_tuple[1].toTensor(),
out_tuple[2].toTensor(),
out_tuple[3].toTensor()};
}
if (export_method == "caffe2_tracing") {
auto out_tuple = outputs.toTuple()->elements();
// A legacy order used by caffe2 models
return MaskRCNNOutputs{
out_tuple[0].toTensor(),
out_tuple[2].toTensor(),
out_tuple[3].toTensor(),
out_tuple[1].toTensor()};
}
if (export_method == "scripting") {
// With the ScriptableAdapter defined in export_model.py, the output is
// List[Dict[str, Any]].
auto out_dict = outputs.toList().get(0).toGenericDict();
return MaskRCNNOutputs{
out_dict.at("pred_boxes").toTensor(),
out_dict.at("pred_classes").toTensor(),
out_dict.at("pred_masks").toTensor(),
out_dict.at("scores").toTensor()};
}
abort();
}
int main(int argc, const char* argv[]) {
if (argc != 4) {
cerr << R"xx(
Usage:
./torchscript_mask_rcnn model.ts input.jpg EXPORT_METHOD
EXPORT_METHOD can be "tracing", "caffe2_tracing" or "scripting".
)xx";
return 1;
}
std::string image_file = argv[2];
std::string export_method = argv[3];
assert(
export_method == "caffe2_tracing" || export_method == "tracing" ||
export_method == "scripting");
torch::jit::getBailoutDepth() = 1;
torch::autograd::AutoGradMode guard(false);
auto module = torch::jit::load(argv[1]);
assert(module.buffers().size() > 0);
// Assume that the entire model is on the same device.
// We just put input to this device.
auto device = (*begin(module.buffers())).device();
cv::Mat input_img = cv::imread(image_file, cv::IMREAD_COLOR);
auto inputs = get_inputs(export_method, input_img, device);
// Run the network
auto output = module.forward({inputs});
if (device.is_cuda())
c10::cuda::getCurrentCUDAStream().synchronize();
// run 3 more times to benchmark
int N_benchmark = 3, N_warmup = 1;
auto start_time = chrono::high_resolution_clock::now();
for (int i = 0; i < N_benchmark + N_warmup; ++i) {
if (i == N_warmup)
start_time = chrono::high_resolution_clock::now();
output = module.forward({inputs});
if (device.is_cuda())
c10::cuda::getCurrentCUDAStream().synchronize();
}
auto end_time = chrono::high_resolution_clock::now();
auto ms = chrono::duration_cast<chrono::microseconds>(end_time - start_time)
.count();
cout << "Latency (should vary with different inputs): "
<< ms * 1.0 / 1e6 / N_benchmark << " seconds" << endl;
// Parse Mask R-CNN outputs
auto rcnn_outputs = get_outputs(export_method, output);
cout << "Number of detected objects: " << rcnn_outputs.num_instances()
<< endl;
cout << "pred_boxes: " << rcnn_outputs.pred_boxes.toString() << " "
<< rcnn_outputs.pred_boxes.sizes() << endl;
cout << "scores: " << rcnn_outputs.scores.toString() << " "
<< rcnn_outputs.scores.sizes() << endl;
cout << "pred_classes: " << rcnn_outputs.pred_classes.toString() << " "
<< rcnn_outputs.pred_classes.sizes() << endl;
cout << "pred_masks: " << rcnn_outputs.pred_masks.toString() << " "
<< rcnn_outputs.pred_masks.sizes() << endl;
cout << rcnn_outputs.pred_boxes << endl;
return 0;
}
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/lazyconfig_train_net.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Training script using the new "LazyConfig" python config files.
This scripts reads a given python config file and runs the training or evaluation.
It can be used to train any models or dataset as long as they can be
instantiated by the recursive construction defined in the given config file.
Besides lazy construction of models, dataloader, etc., this scripts expects a
few common configuration parameters currently defined in "configs/common/train.py".
To add more complicated training logic, you can easily add other configs
in the config file and implement a new train_net.py to handle them.
"""
import logging
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import LazyConfig, instantiate
from detectron2.engine import (
AMPTrainer,
SimpleTrainer,
default_argument_parser,
default_setup,
default_writers,
hooks,
launch,
)
from detectron2.engine.defaults import create_ddp_model
from detectron2.evaluation import inference_on_dataset, print_csv_format
from detectron2.utils import comm
logger = logging.getLogger("detectron2")
def do_test(cfg, model):
if "evaluator" in cfg.dataloader:
ret = inference_on_dataset(
model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
)
print_csv_format(ret)
return ret
def do_train(args, cfg):
"""
Args:
cfg: an object with the following attributes:
model: instantiate to a module
dataloader.{train,test}: instantiate to dataloaders
dataloader.evaluator: instantiate to evaluator for test set
optimizer: instantaite to an optimizer
lr_multiplier: instantiate to a fvcore scheduler
train: other misc config defined in `configs/common/train.py`, including:
output_dir (str)
init_checkpoint (str)
amp.enabled (bool)
max_iter (int)
eval_period, log_period (int)
device (str)
checkpointer (dict)
ddp (dict)
"""
model = instantiate(cfg.model)
logger = logging.getLogger("detectron2")
logger.info("Model:\n{}".format(model))
model.to(cfg.train.device)
cfg.optimizer.params.model = model
optim = instantiate(cfg.optimizer)
train_loader = instantiate(cfg.dataloader.train)
model = create_ddp_model(model, **cfg.train.ddp)
trainer = (AMPTrainer if cfg.train.amp.enabled else SimpleTrainer)(model, train_loader, optim)
checkpointer = DetectionCheckpointer(
model,
cfg.train.output_dir,
trainer=trainer,
)
trainer.register_hooks(
[
hooks.IterationTimer(),
hooks.LRScheduler(scheduler=instantiate(cfg.lr_multiplier)),
hooks.PeriodicCheckpointer(checkpointer, **cfg.train.checkpointer)
if comm.is_main_process()
else None,
hooks.EvalHook(cfg.train.eval_period, lambda: do_test(cfg, model)),
hooks.PeriodicWriter(
default_writers(cfg.train.output_dir, cfg.train.max_iter),
period=cfg.train.log_period,
)
if comm.is_main_process()
else None,
]
)
checkpointer.resume_or_load(cfg.train.init_checkpoint, resume=args.resume)
if args.resume and checkpointer.has_checkpoint():
# The checkpoint stores the training iteration that just finished, thus we start
# at the next iteration
start_iter = trainer.iter + 1
else:
start_iter = 0
trainer.train(start_iter, cfg.train.max_iter)
def main(args):
cfg = LazyConfig.load(args.config_file)
cfg = LazyConfig.apply_overrides(cfg, args.opts)
default_setup(cfg, args)
if args.eval_only:
model = instantiate(cfg.model)
model.to(cfg.train.device)
model = create_ddp_model(model)
DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
print(do_test(cfg, model))
else:
do_train(args, cfg)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/lightning_train_net.py | Python | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Lightning Trainer should be considered beta at this point
# We have confirmed that training and validation run correctly and produce correct results
# Depending on how you launch the trainer, there are issues with processes terminating correctly
# This module is still dependent on D2 logging, but could be transferred to use Lightning logging
import logging
import os
import time
import weakref
from collections import OrderedDict
from typing import Any, Dict, List
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader, build_detection_train_loader
from detectron2.engine import (
DefaultTrainer,
SimpleTrainer,
default_argument_parser,
default_setup,
default_writers,
hooks,
)
from detectron2.evaluation import print_csv_format
from detectron2.evaluation.testing import flatten_results_dict
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
from detectron2.utils.logger import setup_logger
import pytorch_lightning as pl # type: ignore
from pytorch_lightning import LightningDataModule, LightningModule
from train_net import build_evaluator
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("detectron2")
class TrainingModule(LightningModule):
def __init__(self, cfg):
super().__init__()
if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
setup_logger()
self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
self.storage: EventStorage = None
self.model = build_model(self.cfg)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
checkpoint["iteration"] = self.storage.iter
def on_load_checkpoint(self, checkpointed_state: Dict[str, Any]) -> None:
self.start_iter = checkpointed_state["iteration"]
self.storage.iter = self.start_iter
def setup(self, stage: str):
if self.cfg.MODEL.WEIGHTS:
self.checkpointer = DetectionCheckpointer(
# Assume you want to save checkpoints together with logs/statistics
self.model,
self.cfg.OUTPUT_DIR,
)
logger.info(f"Load model weights from checkpoint: {self.cfg.MODEL.WEIGHTS}.")
# Only load weights, use lightning checkpointing if you want to resume
self.checkpointer.load(self.cfg.MODEL.WEIGHTS)
self.iteration_timer = hooks.IterationTimer()
self.iteration_timer.before_train()
self.data_start = time.perf_counter()
self.writers = None
def training_step(self, batch, batch_idx):
data_time = time.perf_counter() - self.data_start
# Need to manually enter/exit since trainer may launch processes
# This ideally belongs in setup, but setup seems to run before processes are spawned
if self.storage is None:
self.storage = EventStorage(0)
self.storage.__enter__()
self.iteration_timer.trainer = weakref.proxy(self)
self.iteration_timer.before_step()
self.writers = (
default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
if comm.is_main_process()
else {}
)
loss_dict = self.model(batch)
SimpleTrainer.write_metrics(loss_dict, data_time)
opt = self.optimizers()
self.storage.put_scalar(
"lr", opt.param_groups[self._best_param_group_id]["lr"], smoothing_hint=False
)
self.iteration_timer.after_step()
self.storage.step()
# A little odd to put before step here, but it's the best way to get a proper timing
self.iteration_timer.before_step()
if self.storage.iter % 20 == 0:
for writer in self.writers:
writer.write()
return sum(loss_dict.values())
def training_step_end(self, training_step_outpus):
self.data_start = time.perf_counter()
return training_step_outpus
def training_epoch_end(self, training_step_outputs):
self.iteration_timer.after_train()
if comm.is_main_process():
self.checkpointer.save("model_final")
for writer in self.writers:
writer.write()
writer.close()
self.storage.__exit__(None, None, None)
def _process_dataset_evaluation_results(self) -> OrderedDict:
results = OrderedDict()
for idx, dataset_name in enumerate(self.cfg.DATASETS.TEST):
results[dataset_name] = self._evaluators[idx].evaluate()
if comm.is_main_process():
print_csv_format(results[dataset_name])
if len(results) == 1:
results = list(results.values())[0]
return results
def _reset_dataset_evaluators(self):
self._evaluators = []
for dataset_name in self.cfg.DATASETS.TEST:
evaluator = build_evaluator(self.cfg, dataset_name)
evaluator.reset()
self._evaluators.append(evaluator)
def on_validation_epoch_start(self, _outputs):
self._reset_dataset_evaluators()
def validation_epoch_end(self, _outputs):
results = self._process_dataset_evaluation_results(_outputs)
flattened_results = flatten_results_dict(results)
for k, v in flattened_results.items():
try:
v = float(v)
except Exception as e:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
) from e
self.storage.put_scalars(**flattened_results, smoothing_hint=False)
def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None:
if not isinstance(batch, List):
batch = [batch]
outputs = self.model(batch)
self._evaluators[dataloader_idx].process(batch, outputs)
def configure_optimizers(self):
optimizer = build_optimizer(self.cfg, self.model)
self._best_param_group_id = hooks.LRScheduler.get_best_param_group_id(optimizer)
scheduler = build_lr_scheduler(self.cfg, optimizer)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
class DataModule(LightningDataModule):
def __init__(self, cfg):
super().__init__()
self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
def train_dataloader(self):
return build_detection_train_loader(self.cfg)
def val_dataloader(self):
dataloaders = []
for dataset_name in self.cfg.DATASETS.TEST:
dataloaders.append(build_detection_test_loader(self.cfg, dataset_name))
return dataloaders
def main(args):
cfg = setup(args)
train(cfg, args)
def train(cfg, args):
trainer_params = {
# training loop is bounded by max steps, use a large max_epochs to make
# sure max_steps is met first
"max_epochs": 10 ** 8,
"max_steps": cfg.SOLVER.MAX_ITER,
"val_check_interval": cfg.TEST.EVAL_PERIOD if cfg.TEST.EVAL_PERIOD > 0 else 10 ** 8,
"num_nodes": args.num_machines,
"gpus": args.num_gpus,
"num_sanity_val_steps": 0,
}
if cfg.SOLVER.AMP.ENABLED:
trainer_params["precision"] = 16
last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt")
if args.resume:
# resume training from checkpoint
trainer_params["resume_from_checkpoint"] = last_checkpoint
logger.info(f"Resuming training from checkpoint: {last_checkpoint}.")
trainer = pl.Trainer(**trainer_params)
logger.info(f"start to train with {args.num_machines} nodes and {args.num_gpus} GPUs")
module = TrainingModule(cfg)
data_module = DataModule(cfg)
if args.eval_only:
logger.info("Running inference")
trainer.validate(module, data_module)
else:
logger.info("Running training")
trainer.fit(module, data_module)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
if __name__ == "__main__":
parser = default_argument_parser()
args = parser.parse_args()
logger.info("Command Line Args:", args)
main(args)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/plain_train_net.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Detectron2 training script with a plain training loop.
This script reads a given config file and runs the training or evaluation.
It is an entry point that is able to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as a library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
Compared to "train_net.py", this script supports fewer default features.
It also includes fewer abstraction, therefore is easier to add custom logic.
"""
import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from detectron2.engine import default_argument_parser, default_setup, default_writers, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
inference_on_dataset,
print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
logger = logging.getLogger("detectron2")
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(
model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
)
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
# compared to "train_net.py", we do not support accurate timing and
# precise BN here, because they are not trivial to implement in a small training loop
data_loader = build_detection_train_loader(cfg)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter - 1
):
do_test(cfg, model)
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
if iteration - start_iter > 5 and (
(iteration + 1) % 20 == 0 or iteration == max_iter - 1
):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/train_net.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
"""
A main training script.
This scripts reads a given config file and runs the training or evaluation.
It is an entry point that is made to train standard models in detectron2.
In order to let one script support training of many models,
this script contains logic that are specific to these built-in models and therefore
may not be suitable for your own project.
For example, your research project perhaps only needs a single "evaluator".
Therefore, we recommend you to use detectron2 as an library and take
this file as an example of how to use the library.
You may want to write your own script with your datasets and other customizations.
"""
import logging
import os
from collections import OrderedDict
import torch
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
verify_results,
)
from detectron2.modeling import GeneralizedRCNNWithTTA
def build_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
assert (
torch.cuda.device_count() > comm.get_rank()
), "CityscapesEvaluator currently do not work with multiple machines."
return CityscapesSemSegEvaluator(dataset_name)
elif evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
elif evaluator_type == "lvis":
return LVISEvaluator(dataset_name, output_dir=output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
elif len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
class Trainer(DefaultTrainer):
"""
We use the "DefaultTrainer" which contains pre-defined default logic for
standard training workflow. They may not work for you, especially if you
are working on a new research project. In that case you can write your
own training loop. You can use "tools/plain_train_net.py" as an example.
"""
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
return build_evaluator(cfg, dataset_name, output_folder)
@classmethod
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger("detectron2.trainer")
# In the end of training, run an evaluation with TTA
# Only support some R-CNN models.
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [
cls.build_evaluator(
cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
)
for name in cfg.DATASETS.TEST
]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({k + "_TTA": v for k, v in res.items()})
return res
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
model = Trainer.build_model(cfg)
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
res = Trainer.test(cfg, model)
if cfg.TEST.AUG.ENABLED:
res.update(Trainer.test_with_TTA(cfg, model))
if comm.is_main_process():
verify_results(cfg, res)
return res
"""
If you'd like to do anything fancier than the standard training logic,
consider writing your own training loop (see plain_train_net.py) or
subclassing the trainer.
"""
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
if cfg.TEST.AUG.ENABLED:
trainer.register_hooks(
[hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/visualize_data.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import os
from itertools import chain
import cv2
import tqdm
from detectron2.config import get_cfg
from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader
from detectron2.data import detection_utils as utils
from detectron2.data.build import filter_images_with_few_keypoints
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def setup(args):
cfg = get_cfg()
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.freeze()
return cfg
def parse_args(in_args=None):
parser = argparse.ArgumentParser(description="Visualize ground-truth data")
parser.add_argument(
"--source",
choices=["annotation", "dataloader"],
required=True,
help="visualize the annotations or the data loader (with pre-processing)",
)
parser.add_argument("--config-file", metavar="FILE", help="path to config file")
parser.add_argument("--output-dir", default="./", help="path to output directory")
parser.add_argument("--show", action="store_true", help="show output in a window")
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser.parse_args(in_args)
if __name__ == "__main__":
args = parse_args()
logger = setup_logger()
logger.info("Arguments: " + str(args))
cfg = setup(args)
dirname = args.output_dir
os.makedirs(dirname, exist_ok=True)
metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
def output(vis, fname):
if args.show:
print(fname)
cv2.imshow("window", vis.get_image()[:, :, ::-1])
cv2.waitKey()
else:
filepath = os.path.join(dirname, fname)
print("Saving to {} ...".format(filepath))
vis.save(filepath)
scale = 1.0
if args.source == "dataloader":
train_data_loader = build_detection_train_loader(cfg)
for batch in train_data_loader:
for per_image in batch:
# Pytorch tensor is in (C, H, W) format
img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)
visualizer = Visualizer(img, metadata=metadata, scale=scale)
target_fields = per_image["instances"].get_fields()
labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
vis = visualizer.overlay_instances(
labels=labels,
boxes=target_fields.get("gt_boxes", None),
masks=target_fields.get("gt_masks", None),
keypoints=target_fields.get("gt_keypoints", None),
)
output(vis, str(per_image["image_id"]) + ".jpg")
else:
dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN]))
if cfg.MODEL.KEYPOINT_ON:
dicts = filter_images_with_few_keypoints(dicts, 1)
for dic in tqdm.tqdm(dicts):
img = utils.read_image(dic["file_name"], "RGB")
visualizer = Visualizer(img, metadata=metadata, scale=scale)
vis = visualizer.draw_dataset_dict(dic)
output(vis, os.path.basename(dic["file_name"]))
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
tools/visualize_json_results.py | Python | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import json
import numpy as np
import os
from collections import defaultdict
import cv2
import tqdm
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import Boxes, BoxMode, Instances
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
def create_instances(predictions, image_size):
ret = Instances(image_size)
score = np.asarray([x["score"] for x in predictions])
chosen = (score > args.conf_threshold).nonzero()[0]
score = score[chosen]
bbox = np.asarray([predictions[i]["bbox"] for i in chosen]).reshape(-1, 4)
bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
labels = np.asarray([dataset_id_map(predictions[i]["category_id"]) for i in chosen])
ret.scores = score
ret.pred_boxes = Boxes(bbox)
ret.pred_classes = labels
try:
ret.pred_masks = [predictions[i]["segmentation"] for i in chosen]
except KeyError:
pass
return ret
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A script that visualizes the json predictions from COCO or LVIS dataset."
)
parser.add_argument("--input", required=True, help="JSON file produced by the model")
parser.add_argument("--output", required=True, help="output directory")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2017_val")
parser.add_argument("--conf-threshold", default=0.5, type=float, help="confidence threshold")
args = parser.parse_args()
logger = setup_logger()
with PathManager.open(args.input, "r") as f:
predictions = json.load(f)
pred_by_image = defaultdict(list)
for p in predictions:
pred_by_image[p["image_id"]].append(p)
dicts = list(DatasetCatalog.get(args.dataset))
metadata = MetadataCatalog.get(args.dataset)
if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
def dataset_id_map(ds_id):
return metadata.thing_dataset_id_to_contiguous_id[ds_id]
elif "lvis" in args.dataset:
# LVIS results are in the same format as COCO results, but have a different
# mapping from dataset category id to contiguous category id in [0, #categories - 1]
def dataset_id_map(ds_id):
return ds_id - 1
else:
raise ValueError("Unsupported dataset: {}".format(args.dataset))
os.makedirs(args.output, exist_ok=True)
for dic in tqdm.tqdm(dicts):
img = cv2.imread(dic["file_name"], cv2.IMREAD_COLOR)[:, :, ::-1]
basename = os.path.basename(dic["file_name"])
predictions = create_instances(pred_by_image[dic["image_id"]], img.shape[:2])
vis = Visualizer(img, metadata)
vis_pred = vis.draw_instance_predictions(predictions).get_image()
vis = Visualizer(img, metadata)
vis_gt = vis.draw_dataset_dict(dic).get_image()
concat = np.concatenate((vis_pred, vis_gt), axis=1)
cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
train_net.py | Python | import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import time
import datetime
import json
from fvcore.common.timer import Timer
import detectron2.utils.comm as comm
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
)
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.evaluation import (
COCOEvaluator,
LVISEvaluator,
inference_on_dataset,
print_csv_format,
)
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import (
CommonMetricPrinter,
EventStorage,
JSONWriter,
TensorboardXWriter,
)
from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA
from detectron2.data.dataset_mapper import DatasetMapper
from detectron2.data.build import build_detection_train_loader
from centernet.config import add_centernet_config
from centernet.data.custom_build_augmentation import build_custom_augmentation
logger = logging.getLogger("detectron2")
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
mapper = None if cfg.INPUT.TEST_INPUT_TYPE == 'default' else \
DatasetMapper(
cfg, False, augmentations=build_custom_augmentation(cfg, False))
data_loader = build_detection_test_loader(cfg, dataset_name, mapper=mapper)
output_folder = os.path.join(
cfg.OUTPUT_DIR, "inference_{}".format(dataset_name))
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type == "lvis":
evaluator = LVISEvaluator(dataset_name, cfg, True, output_folder)
elif evaluator_type == 'coco':
evaluator = COCOEvaluator(dataset_name, cfg, True, output_folder)
else:
assert 0, evaluator_type
results[dataset_name] = inference_on_dataset(
model, data_loader, evaluator)
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(
dataset_name))
print_csv_format(results[dataset_name])
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(
model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(
cfg.MODEL.WEIGHTS, resume=resume,
).get("iteration", -1) + 1
)
if cfg.SOLVER.RESET_ITER:
logger.info('Reset loaded iteration. Start training from iteration 0.')
start_iter = 0
max_iter = cfg.SOLVER.MAX_ITER if cfg.SOLVER.TRAIN_ITER < 0 else cfg.SOLVER.TRAIN_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
writers = (
[
CommonMetricPrinter(max_iter),
JSONWriter(os.path.join(cfg.OUTPUT_DIR, "metrics.json")),
TensorboardXWriter(cfg.OUTPUT_DIR),
]
if comm.is_main_process()
else []
)
mapper = DatasetMapper(cfg, True) if cfg.INPUT.CUSTOM_AUG == '' else \
DatasetMapper(cfg, True, augmentations=build_custom_augmentation(cfg, True))
if cfg.DATALOADER.SAMPLER_TRAIN in ['TrainingSampler', 'RepeatFactorTrainingSampler']:
data_loader = build_detection_train_loader(cfg, mapper=mapper)
else:
from centernet.data.custom_dataset_dataloader import build_custom_train_loader
data_loader = build_custom_train_loader(cfg, mapper=mapper)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
step_timer = Timer()
data_timer = Timer()
start_time = time.perf_counter()
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
data_time = data_timer.seconds()
storage.put_scalars(data_time=data_time)
step_timer.reset()
iteration = iteration + 1
storage.step()
loss_dict = model(data)
losses = sum(
loss for k, loss in loss_dict.items())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() \
for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(
total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar(
"lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
step_time = step_timer.seconds()
storage.put_scalars(time=step_time)
data_timer.reset()
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and iteration % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter
):
do_test(cfg, model)
comm.synchronize()
if iteration - start_iter > 5 and \
(iteration % 20 == 0 or iteration == max_iter):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
total_time = time.perf_counter() - start_time
logger.info(
"Total training time: {}".format(
str(datetime.timedelta(seconds=int(total_time)))))
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_centernet_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
if '/auto' in cfg.OUTPUT_DIR:
file_name = os.path.basename(args.config_file)[:-5]
cfg.OUTPUT_DIR = cfg.OUTPUT_DIR.replace('/auto', '/{}'.format(file_name))
logger.info('OUTPUT_DIR: {}'.format(cfg.OUTPUT_DIR))
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
if cfg.TEST.AUG.ENABLED:
logger.info("Running inference with test-time augmentation ...")
model = GeneralizedRCNNWithTTA(cfg, model, batch_size=1)
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False,
find_unused_parameters=True
)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser()
args.add_argument('--manual_device', default='')
args = args.parse_args()
if args.manual_device != '':
os.environ['CUDA_VISIBLE_DEVICES'] = args.manual_device
args.dist_url = 'tcp://127.0.0.1:{}'.format(
torch.randint(11111, 60000, (1,))[0].item())
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| xingyizhou/CenterNet2 | 1,222 | Two-stage CenterNet | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/coco_pose_tracking.sh | Shell | cd src
# train, the model is finetuned from a CenterNet detection model from the CenterNet model zoo.
python main.py tracking,multi_pose --exp_id coco_pose_tracking --dataset coco_hp --load_model ../models/multi_pose_dla_3x.pth --gpus 0,1,2,3,4,5,6,7 --batch_size 128 --lr 5e-4 --num_workers 16 --pre_hm --shift 0.05 --scale 0.05 --hm_disturb 0.05 --lost_disturb 0.4 --fp_disturb 0.1 | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/coco_tracking.sh | Shell | cd src
# train, the model is finetuned from a CenterNet detection model from the CenterNet model zoo.
python main.py tracking --exp_id coco_tracking --tracking --load_model ../models/ctdet_coco_dla_2x.pth --gpus 0,1,2,3,4,5,6,7 --batch_size 128 --lr 5e-4 --num_workers 16 --pre_hm --shift 0.05 --scale 0.05 --hm_disturb 0.05 --lost_disturb 0.4 --fp_disturb 0.1 | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/crowdhuman.sh | Shell | cd src
# train
python main.py tracking --exp_id crowdhuman --dataset crowdhuman --ltrb_amodal --pre_hm --shift 0.05 --scale 0.05 --hm_disturb 0.05 --lost_disturb 0.4 --fp_disturb 0.1 --num_epochs 140 --lr_step 90,120 --save_point 60,90 --gpus 0,1,2,3 --batch_size 64 --lr 2.5e-4 --num_workers 16
cd .. | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/kitti_fulltrain.sh | Shell | cd src
# train
python main.py tracking --exp_id kitti_fulltrain --dataset kitti_tracking --dataset_version train --pre_hm --same_aug --hm_disturb 0.05 --lost_disturb 0.2 --fp_disturb 0.1 --gpus 0,1 --batch_size 16 --load_model ../models/nuScenes_3Ddetection_e140.pth
# test
python test.py tracking --exp_id kitti_fulltrain --dataset kitti_tracking --dataset_version test --pre_hm --track_thresh 0.4 --resume
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/kitti_half.sh | Shell | cd src
# train
python main.py tracking --exp_id kitti_half --dataset kitti_tracking --dataset_version train_half --pre_hm --same_aug --hm_disturb 0.05 --lost_disturb 0.2 --fp_disturb 0.1 --gpus 0,1 --batch_size 16 --load_model ../models/nuScenes_3Ddetection_e140.pth
# test
python test.py tracking --exp_id kitti_half --dataset kitti_tracking --dataset_version val_half --pre_hm --track_thresh 0.4 --resume | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/kitti_half_sc.sh | Shell | cd src
# train
python main.py tracking --exp_id kitti_half_sc --dataset kitti_tracking --dataset_version train_half --pre_hm --same_aug --hm_disturb 0.05 --lost_disturb 0.2 --fp_disturb 0.1 --gpus 0,1 --batch_size 16
# test
python test.py tracking --exp_id kitti_half_sc --dataset kitti_tracking --dataset_version val_half --pre_hm --track_thresh 0.4 --pre_thresh 0.5 --resume | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/mot17_fulltrain.sh | Shell | cd src
# train
python main.py tracking --exp_id mot17_fulltrain --dataset mot --dataset_version 17trainval --pre_hm --ltrb_amodal --same_aug --hm_disturb 0.05 --lost_disturb 0.4 --fp_disturb 0.1 --gpus 0,1 --load_model ../models/crowdhuman.pth
# test
python test.py tracking --exp_id mot17_fulltrain --dataset mot --dataset_version 17test --pre_hm --ltrb_amodal --track_thresh 0.4 --pre_thresh 0.5 --resume
cd .. | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/mot17_fulltrain_sc.sh | Shell | cd src
# train
python main.py tracking --exp_id mot17_fulltrain_sc --dataset mot --dataset_version 17trainval --pre_hm --ltrb_amodal --same_aug --hm_disturb 0.05 --lost_disturb 0.4 --fp_disturb 0.1 --gpus 0,1
# test
python test.py tracking --exp_id mot17_fulltrain_sc --dataset mot --dataset_version 17test --pre_hm --ltrb_amodal --track_thresh 0.4 --pre_thresh 0.5 --resume
# test with public detection
python test.py tracking --exp_id mot17_fulltrain_sc --dataset mot --dataset_version 17test --pre_hm --ltrb_amodal --track_thresh 0.4 --pre_thresh 0.5 --resume --public_det --load_results ../data/mot17/results/test_det.json
cd .. | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/mot17_half.sh | Shell | cd src
# train
python main.py tracking --exp_id mot17_half --dataset mot --dataset_version 17halftrain --pre_hm --ltrb_amodal --same_aug --hm_disturb 0.05 --lost_disturb 0.4 --fp_disturb 0.1 --gpus 0,1 --load_model ../models/crowdhuman.pth
# test
python test.py tracking --exp_id mot17_half --dataset mot --dataset_version 17halfval --pre_hm --ltrb_amodal --track_thresh 0.4 --pre_thresh 0.5 --resume
# test with public detection
python test.py tracking --exp_id mot17_half --dataset mot --dataset_version 17halfval --pre_hm --ltrb_amodal --track_thresh 0.4 --pre_thresh 0.5 --resume --public_det --load_results ../data/mot17/results/val_half_det.json
cd .. | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/mot17_half_sc.sh | Shell | cd src
# train
python main.py tracking --exp_id mot17_half_sc --dataset mot --dataset_version 17halftrain --pre_hm --ltrb_amodal --same_aug --hm_disturb 0.05 --lost_disturb 0.4 --fp_disturb 0.1 --gpus 0,1
# test
python test.py tracking --exp_id mot17_half_sc --dataset mot --dataset_version 17halfval --pre_hm --ltrb_amodal --track_thresh 0.4 --pre_thresh 0.5 --resume
cd .. | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/nuScenes_3Ddetection_e140.sh | Shell | cd src
# train
python main.py ddd --exp_id nuScenes_3Ddetection_e140 --dataset nuscenes --batch_size 128 --gpus 0,1,2,3,4,5,6,7 --lr 5e-4 --num_epochs 140 --lr_step 90,120 --save_point 90,120
# test
python test.py ddd --exp_id nuScenes_3Ddetection_e140 --dataset nuscenes --resume
cd .. | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
experiments/nuScenes_3Dtracking.sh | Shell | cd src
# train
python main.py tracking,ddd --exp_id nuScenes_3Dtracking --dataset nuscenes --pre_hm --load_model ../models/nuScenes_3Ddetection_e140.pth --shift 0.01 --scale 0.05 --lost_disturb 0.4 --fp_disturb 0.1 --hm_disturb 0.05 --batch_size 64 --gpus 0,1,2,3 --lr 2.5e-4 --save_point 60
# test
python test.py tracking,ddd --exp_id nuScenes_3Dtracking --dataset nuscenes --pre_hm --track_thresh 0.1 --resume
cd .. | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/_init_paths.py | Python | import os.path as osp
import sys
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
this_dir = osp.dirname(__file__)
# Add lib to PYTHONPATH
lib_path = osp.join(this_dir, 'lib')
add_path(lib_path)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/convert_onnx.py | Python | '''
Script to convert a trained CenterNet model to ONNX, currently only
support non-DCN models.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import cv2
import numpy as np
import time
from progress.bar import Bar
import torch
import copy
from model.model import create_model, load_model
from opts import opts
from dataset.dataset_factory import dataset_factory
from detector import Detector
def convert_onnx(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.model_output_list = True
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
Dataset = dataset_factory[opt.test_dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
model = create_model(
opt.arch, opt.heads, opt.head_conv, opt=opt)
if opt.load_model != '':
model = load_model(model, opt.load_model, opt)
model = model.to(opt.device)
model.eval()
dummy_input1 = torch.randn(1, 3, opt.input_h, opt.input_w).to(opt.device)
if opt.tracking:
dummy_input2 = torch.randn(1, 3, opt.input_h, opt.input_w).to(opt.device)
if opt.pre_hm:
dummy_input3 = torch.randn(1, 1, opt.input_h, opt.input_w).to(opt.device)
torch.onnx.export(
model, (dummy_input1, dummy_input2, dummy_input3),
"../models/{}.onnx".format(opt.exp_id))
else:
torch.onnx.export(
model, (dummy_input1, dummy_input2),
"../models/{}.onnx".format(opt.exp_id))
else:
torch.onnx.export(
model, (dummy_input1, ),
"../models/{}.onnx".format(opt.exp_id))
if __name__ == '__main__':
opt = opts().parse()
convert_onnx(opt)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/demo.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import cv2
import json
import copy
import numpy as np
from opts import opts
from detector import Detector
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'display']
def demo(opt):
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
opt.debug = max(opt.debug, 1)
detector = Detector(opt)
if opt.demo == 'webcam' or \
opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
is_video = True
# demo on video stream
cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
else:
is_video = False
# Demo on images sequences
if os.path.isdir(opt.demo):
image_names = []
ls = os.listdir(opt.demo)
for file_name in sorted(ls):
ext = file_name[file_name.rfind('.') + 1:].lower()
if ext in image_ext:
image_names.append(os.path.join(opt.demo, file_name))
else:
image_names = [opt.demo]
# Initialize output video
out = None
out_name = opt.demo[opt.demo.rfind('/') + 1:]
print('out_name', out_name)
if opt.save_video:
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
fourcc = cv2.VideoWriter_fourcc(*'H264')
out = cv2.VideoWriter('../results/{}.mp4'.format(
opt.exp_id + '_' + out_name),fourcc, opt.save_framerate, (
opt.video_w, opt.video_h))
if opt.debug < 5:
detector.pause = False
cnt = 0
results = {}
while True:
if is_video:
_, img = cam.read()
if img is None:
save_and_exit(opt, out, results, out_name)
else:
if cnt < len(image_names):
img = cv2.imread(image_names[cnt])
else:
save_and_exit(opt, out, results, out_name)
cnt += 1
# resize the original video for saving video results
if opt.resize_video:
img = cv2.resize(img, (opt.video_w, opt.video_h))
# skip the first X frames of the video
if cnt < opt.skip_first:
continue
cv2.imshow('input', img)
# track or detect the image.
ret = detector.run(img)
# log run time
time_str = 'frame {} |'.format(cnt)
for stat in time_stats:
time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
print(time_str)
# results[cnt] is a list of dicts:
# [{'bbox': [x1, y1, x2, y2], 'tracking_id': id, 'category_id': c, ...}]
results[cnt] = ret['results']
# save debug image to video
if opt.save_video:
out.write(ret['generic'])
if not is_video:
cv2.imwrite('../results/demo{}.jpg'.format(cnt), ret['generic'])
# esc to quit and finish saving video
if cv2.waitKey(1) == 27:
save_and_exit(opt, out, results, out_name)
return
save_and_exit(opt, out, results)
def save_and_exit(opt, out=None, results=None, out_name=''):
if opt.save_results and (results is not None):
save_dir = '../results/{}_results.json'.format(opt.exp_id + '_' + out_name)
print('saving results to', save_dir)
json.dump(_to_list(copy.deepcopy(results)),
open(save_dir, 'w'))
if opt.save_video and out is not None:
out.release()
sys.exit(0)
def _to_list(results):
for img_id in results:
for t in range(len(results[img_id])):
for k in results[img_id][t]:
if isinstance(results[img_id][t][k], (np.ndarray, np.float32)):
results[img_id][t][k] = results[img_id][t][k].tolist()
return results
if __name__ == '__main__':
opt = opts().init()
demo(opt)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/dataset_factory.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
from .datasets.coco import COCO
from .datasets.kitti import KITTI
from .datasets.coco_hp import COCOHP
from .datasets.mot import MOT
from .datasets.nuscenes import nuScenes
from .datasets.crowdhuman import CrowdHuman
from .datasets.kitti_tracking import KITTITracking
from .datasets.custom_dataset import CustomDataset
dataset_factory = {
'custom': CustomDataset,
'coco': COCO,
'kitti': KITTI,
'coco_hp': COCOHP,
'mot': MOT,
'nuscenes': nuScenes,
'crowdhuman': CrowdHuman,
'kitti_tracking': KITTITracking,
}
def get_dataset(dataset):
return dataset_factory[dataset]
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/coco.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import copy
from ..generic_dataset import GenericDataset
class COCO(GenericDataset):
default_resolution = [512, 512]
num_categories = 80
class_name = [
'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush']
_valid_ids = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 27, 28, 31, 32, 33, 34, 35, 36,
37, 38, 39, 40, 41, 42, 43, 44, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 67, 70,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 90]
cat_ids = {v: i + 1 for i, v in enumerate(_valid_ids)}
num_joints = 17
flip_idx = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]
edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[6, 12], [5, 11], [11, 12],
[12, 14], [14, 16], [11, 13], [13, 15]]
max_objs = 128
def __init__(self, opt, split):
# load annotations
data_dir = os.path.join(opt.data_dir, 'coco')
img_dir = os.path.join(data_dir, '{}2017'.format(split))
if opt.trainval:
split = 'test'
ann_path = os.path.join(
data_dir, 'annotations',
'image_info_test-dev2017.json')
else:
ann_path = os.path.join(
data_dir, 'annotations',
'instances_{}2017.json').format(split)
self.images = None
# load image list and coco
super(COCO, self).__init__(opt, split, ann_path, img_dir)
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
detections = []
for image_id in all_bboxes:
if type(all_bboxes[image_id]) != type({}):
# newest format
for j in range(len(all_bboxes[image_id])):
item = all_bboxes[image_id][j]
cat_id = item['class'] - 1
category_id = self._valid_ids[cat_id]
bbox = item['bbox']
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(item['score']))
}
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results_coco.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results_coco.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize() | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/coco_hp.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
from ..generic_dataset import GenericDataset
class COCOHP(GenericDataset):
num_categories = 1
class_name = ['']
num_joints = 17
default_resolution = [512, 512]
flip_idx = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]
edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[6, 12], [5, 11], [11, 12],
[12, 14], [14, 16], [11, 13], [13, 15]]
max_objs = 32
cat_ids = {1: 1}
def __init__(self, opt, split):
data_dir = os.path.join(opt.data_dir, 'coco')
img_dir = os.path.join(data_dir, '{}2017'.format(split))
if split == 'test':
ann_path = os.path.join(data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
ann_path = os.path.join(data_dir, 'annotations',
'person_keypoints_{}2017.json').format(split)
self.images = None
# load image list and coco
super(COCOHP, self).__init__(opt, split, ann_path, img_dir)
if split == 'train':
image_ids = self.coco.getImgIds()
self.images = []
for img_id in image_ids:
idxs = self.coco.getAnnIds(imgIds=[img_id])
if len(idxs) > 0:
self.images.append(img_id)
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
# import pdb; pdb.set_trace()
detections = []
for image_id in all_bboxes:
if type(all_bboxes[image_id]) != type({}):
# newest format
for j in range(len(all_bboxes[image_id])):
item = all_bboxes[image_id][j]
if item['class'] != 1:
continue
category_id = 1
keypoints = np.concatenate([
np.array(item['hps'], dtype=np.float32).reshape(-1, 2),
np.ones((17, 1), dtype=np.float32)], axis=1).reshape(51).tolist()
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"score": float("{:.2f}".format(item['score'])),
"keypoints": keypoints
}
if 'bbox' in item:
bbox = item['bbox']
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection['bbox'] = bbox_out
detections.append(detection)
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
json.dump(self.convert_eval_format(results),
open('{}/results_cocohp.json'.format(save_dir), 'w'))
def run_eval(self, results, save_dir):
# result_json = os.path.join(opt.save_dir, "results.json")
# detections = convert_eval_format(all_boxes)
# json.dump(detections, open(result_json, "w"))
self.save_results(results, save_dir)
coco_dets = self.coco.loadRes('{}/results_cocohp.json'.format(save_dir))
coco_eval = COCOeval(self.coco, coco_dets, "keypoints")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_eval = COCOeval(self.coco, coco_dets, "bbox")
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/crowdhuman.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
from ..generic_dataset import GenericDataset
class CrowdHuman(GenericDataset):
num_classes = 1
num_joints = 17
default_resolution = [512, 512]
max_objs = 128
class_name = ['person']
cat_ids = {1: 1}
def __init__(self, opt, split):
super(CrowdHuman, self).__init__()
data_dir = os.path.join(opt.data_dir, 'crowdhuman')
img_dir = os.path.join(
data_dir, 'CrowdHuman_{}'.format(split), 'Images')
ann_path = os.path.join(data_dir, 'annotations',
'{}.json').format(split)
print('==> initializing CityPersons {} data.'.format(split))
self.images = None
# load image list and coco
super(CrowdHuman, self).__init__(opt, split, ann_path, img_dir)
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def _save_results(self, records, fpath):
with open(fpath,'w') as fid:
for record in records:
line = json.dumps(record)+'\n'
fid.write(line)
return fpath
def convert_eval_format(self, all_bboxes):
detections = []
person_id = 1
for image_id in all_bboxes:
if type(all_bboxes[image_id]) != type({}):
# newest format
dtboxes = []
for j in range(len(all_bboxes[image_id])):
item = all_bboxes[image_id][j]
if item['class'] != person_id:
continue
bbox = item['bbox']
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"tag": 1,
"box": bbox_out,
"score": float("{:.2f}".format(item['score']))
}
dtboxes.append(detection)
img_info = self.coco.loadImgs(ids=[image_id])[0]
file_name = img_info['file_name']
detections.append({'ID': file_name[:-4], 'dtboxes': dtboxes})
return detections
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
self._save_results(self.convert_eval_format(results),
'{}/results_crowdhuman.odgt'.format(save_dir))
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
try:
os.system('python tools/crowdhuman_eval/demo.py ' + \
'../data/crowdhuman/annotation_val.odgt ' + \
'{}/results_crowdhuman.odgt'.format(save_dir))
except:
print('Crowdhuman evaluation not setup!') | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/custom_dataset.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ..generic_dataset import GenericDataset
class CustomDataset(GenericDataset):
num_categories = 1
default_resolution = [-1, -1]
class_name = ['']
max_objs = 128
cat_ids = {1: 1}
def __init__(self, opt, split):
assert (opt.custom_dataset_img_path != '') and \
(opt.custom_dataset_ann_path != '') and \
(opt.num_classes != -1) and \
(opt.input_h != -1) and (opt.input_w != -1), \
'The following arguments must be specified for custom datasets: ' + \
'custom_dataset_img_path, custom_dataset_ann_path, num_classes, ' + \
'input_h, input_w.'
img_dir = opt.custom_dataset_img_path
ann_path = opt.custom_dataset_ann_path
self.num_categories = opt.num_classes
self.class_name = ['' for _ in range(self.num_categories)]
self.default_resolution = [opt.input_h, opt.input_w]
self.cat_ids = {i: i for i in range(1, self.num_categories + 1)}
self.images = None
# load image list and coco
super().__init__(opt, split, ann_path, img_dir)
self.num_samples = len(self.images)
print('Loaded Custom dataset {} samples'.format(self.num_samples))
def __len__(self):
return self.num_samples
def run_eval(self, results, save_dir):
pass
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/kitti.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from ..generic_dataset import GenericDataset
from utils.ddd_utils import compute_box_3d, project_to_image
class KITTI(GenericDataset):
num_categories = 3
default_resolution = [384, 1280]
# ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting',
# 'Tram', 'Misc', 'DontCare']
class_name = ['Pedestrian', 'Car', 'Cyclist']
# negative id is for "not as negative sample for abs(id)".
# 0 for ignore losses for all categories in the bounding box region
cat_ids = {1:1, 2:2, 3:3, 4:-2, 5:-2, 6:-1, 7:-9999, 8:-9999, 9:0}
max_objs = 50
def __init__(self, opt, split):
data_dir = os.path.join(opt.data_dir, 'kitti')
img_dir = os.path.join(data_dir, 'images', 'trainval')
if opt.trainval:
split = 'trainval' if split == 'train' else 'test'
img_dir = os.path.join(data_dir, 'images', split)
ann_path = os.path.join(
data_dir, 'annotations', 'kitti_v2_{}.json').format(split)
else:
ann_path = os.path.join(data_dir,
'annotations', 'kitti_v2_{}_{}.json').format(opt.kitti_split, split)
self.images = None
# load image list and coco
super(KITTI, self).__init__(opt, split, ann_path, img_dir)
self.alpha_in_degree = False
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_eval_format(self, all_bboxes):
pass
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results_kitti')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for img_id in results.keys():
out_path = os.path.join(results_dir, '{:06d}.txt'.format(img_id))
f = open(out_path, 'w')
for i in range(len(results[img_id])):
item = results[img_id][i]
category_id = item['class']
cls_name_ind = category_id
class_name = self.class_name[cls_name_ind - 1]
if not ('alpha' in item):
item['alpha'] = -1
if not ('rot_y' in item):
item['rot_y'] = -1
if 'dim' in item:
item['dim'] = [max(item['dim'][0], 0.01),
max(item['dim'][1], 0.01), max(item['dim'][2], 0.01)]
if not ('dim' in item):
item['dim'] = [-1000, -1000, -1000]
if not ('loc' in item):
item['loc'] = [-1000, -1000, -1000]
f.write('{} 0.0 0'.format(class_name))
f.write(' {:.2f}'.format(item['alpha']))
f.write(' {:.2f} {:.2f} {:.2f} {:.2f}'.format(
item['bbox'][0], item['bbox'][1], item['bbox'][2], item['bbox'][3]))
f.write(' {:.2f} {:.2f} {:.2f}'.format(
item['dim'][0], item['dim'][1], item['dim'][2]))
f.write(' {:.2f} {:.2f} {:.2f}'.format(
item['loc'][0], item['loc'][1], item['loc'][2]))
f.write(' {:.2f} {:.2f}\n'.format(item['rot_y'], item['score']))
f.close()
def run_eval(self, results, save_dir):
# import pdb; pdb.set_trace()
self.save_results(results, save_dir)
print('Results of IoU threshold 0.7')
os.system('./tools/kitti_eval/evaluate_object_3d_offline_07 ' + \
'../data/kitti/training/label_val ' + \
'{}/results_kitti/'.format(save_dir))
print('Results of IoU threshold 0.5')
os.system('./tools/kitti_eval/evaluate_object_3d_offline ' + \
'../data/kitti/training/label_val ' + \
'{}/results_kitti/'.format(save_dir))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/kitti_tracking.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from ..generic_dataset import GenericDataset
from utils.ddd_utils import compute_box_3d, project_to_image
class KITTITracking(GenericDataset):
num_categories = 3
default_resolution = [384, 1280]
class_name = ['Pedestrian', 'Car', 'Cyclist']
# negative id is for "not as negative sample for abs(id)".
# 0 for ignore losses for all categories in the bounding box region
# ['Pedestrian', 'Car', 'Cyclist', 'Van', 'Truck', 'Person_sitting',
# 'Tram', 'Misc', 'DontCare']
cat_ids = {1:1, 2:2, 3:3, 4:-2, 5:-2, 6:-1, 7:-9999, 8:-9999, 9:0}
max_objs = 50
def __init__(self, opt, split):
data_dir = os.path.join(opt.data_dir, 'kitti_tracking')
split_ = 'train' if opt.dataset_version != 'test' else 'test' #'test'
img_dir = os.path.join(
data_dir, 'data_tracking_image_2', '{}ing'.format(split_), 'image_02')
ann_file_ = split_ if opt.dataset_version == '' else opt.dataset_version
print('Warning! opt.dataset_version is not set')
ann_path = os.path.join(
data_dir, 'annotations', 'tracking_{}.json'.format(
ann_file_))
self.images = None
super(KITTITracking, self).__init__(opt, split, ann_path, img_dir)
self.alpha_in_degree = False
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results_kitti_tracking')
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for video in self.coco.dataset['videos']:
video_id = video['id']
file_name = video['file_name']
out_path = os.path.join(results_dir, '{}.txt'.format(file_name))
f = open(out_path, 'w')
images = self.video_to_images[video_id]
for image_info in images:
img_id = image_info['id']
if not (img_id in results):
continue
frame_id = image_info['frame_id']
for i in range(len(results[img_id])):
item = results[img_id][i]
category_id = item['class']
cls_name_ind = category_id
class_name = self.class_name[cls_name_ind - 1]
if not ('alpha' in item):
item['alpha'] = -1
if not ('rot_y' in item):
item['rot_y'] = -10
if 'dim' in item:
item['dim'] = [max(item['dim'][0], 0.01),
max(item['dim'][1], 0.01), max(item['dim'][2], 0.01)]
if not ('dim' in item):
item['dim'] = [-1, -1, -1]
if not ('loc' in item):
item['loc'] = [-1000, -1000, -1000]
track_id = item['tracking_id'] if 'tracking_id' in item else -1
f.write('{} {} {} -1 -1'.format(frame_id - 1, track_id, class_name))
f.write(' {:d}'.format(int(item['alpha'])))
f.write(' {:.2f} {:.2f} {:.2f} {:.2f}'.format(
item['bbox'][0], item['bbox'][1], item['bbox'][2], item['bbox'][3]))
f.write(' {:d} {:d} {:d}'.format(
int(item['dim'][0]), int(item['dim'][1]), int(item['dim'][2])))
f.write(' {:d} {:d} {:d}'.format(
int(item['loc'][0]), int(item['loc'][1]), int(item['loc'][2])))
f.write(' {:d} {:.2f}\n'.format(int(item['rot_y']), item['score']))
f.close()
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
os.system('python tools/eval_kitti_track/evaluate_tracking.py ' + \
'{}/results_kitti_tracking/ {}'.format(
save_dir, self.opt.dataset_version))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/mot.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
from collections import defaultdict
from ..generic_dataset import GenericDataset
class MOT(GenericDataset):
num_categories = 1
default_resolution = [544, 960]
class_name = ['']
max_objs = 256
cat_ids = {1: 1, -1: -1}
def __init__(self, opt, split):
self.dataset_version = opt.dataset_version
self.year = int(self.dataset_version[:2])
print('Using MOT {} {}'.format(self.year, self.dataset_version))
data_dir = os.path.join(opt.data_dir, 'mot{}'.format(self.year))
if opt.dataset_version in ['17trainval', '17test']:
ann_file = '{}.json'.format('train' if split == 'train' else \
'test')
elif opt.dataset_version == '17halftrain':
ann_file = '{}.json'.format('train_half')
elif opt.dataset_version == '17halfval':
ann_file = '{}.json'.format('val_half')
img_dir = os.path.join(data_dir, '{}'.format(
'test' if 'test' in self.dataset_version else 'train'))
print('ann_file', ann_file)
ann_path = os.path.join(data_dir, 'annotations', ann_file)
self.images = None
# load image list and coco
super(MOT, self).__init__(opt, split, ann_path, img_dir)
self.num_samples = len(self.images)
print('Loaded MOT {} {} {} samples'.format(
self.dataset_version, split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def __len__(self):
return self.num_samples
def save_results(self, results, save_dir):
results_dir = os.path.join(save_dir, 'results_mot{}'.format(self.dataset_version))
if not os.path.exists(results_dir):
os.mkdir(results_dir)
for video in self.coco.dataset['videos']:
video_id = video['id']
file_name = video['file_name']
out_path = os.path.join(results_dir, '{}.txt'.format(file_name))
f = open(out_path, 'w')
images = self.video_to_images[video_id]
tracks = defaultdict(list)
for image_info in images:
if not (image_info['id'] in results):
continue
result = results[image_info['id']]
frame_id = image_info['frame_id']
for item in result:
if not ('tracking_id' in item):
item['tracking_id'] = np.random.randint(100000)
if item['active'] == 0:
continue
tracking_id = item['tracking_id']
bbox = item['bbox']
bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
tracks[tracking_id].append([frame_id] + bbox)
rename_track_id = 0
for track_id in sorted(tracks):
rename_track_id += 1
for t in tracks[track_id]:
f.write('{},{},{:.2f},{:.2f},{:.2f},{:.2f},-1,-1,-1,-1\n'.format(
t[0], rename_track_id, t[1], t[2], t[3]-t[1], t[4]-t[2]))
f.close()
def run_eval(self, results, save_dir):
self.save_results(results, save_dir)
gt_type_str = '{}'.format(
'_train_half' if '17halftrain' in self.opt.dataset_version \
else '_val_half' if '17halfval' in self.opt.dataset_version \
else '')
gt_type_str = '_val_half' if self.year in [16, 19] else gt_type_str
gt_type_str = '--gt_type {}'.format(gt_type_str) if gt_type_str != '' else \
''
os.system('python tools/eval_motchallenge.py ' + \
'../data/mot{}/{}/ '.format(self.year, 'train') + \
'{}/results_mot{}/ '.format(save_dir, self.dataset_version) + \
gt_type_str + ' --eval_official')
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/datasets/nuscenes.py | Python | # Copyright (c) Xingyi Zhou. All Rights Reserved
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
from pyquaternion import Quaternion
import numpy as np
import torch
import json
import cv2
import os
import math
import copy
from ..generic_dataset import GenericDataset
from utils.ddd_utils import compute_box_3d, project_to_image
class nuScenes(GenericDataset):
default_resolution = [448, 800]
num_categories = 10
class_name = [
'car', 'truck', 'bus', 'trailer',
'construction_vehicle', 'pedestrian', 'motorcycle', 'bicycle',
'traffic_cone', 'barrier']
cat_ids = {i + 1: i + 1 for i in range(num_categories)}
focal_length = 1200
max_objs = 128
_tracking_ignored_class = ['construction_vehicle', 'traffic_cone', 'barrier']
_vehicles = ['car', 'truck', 'bus', 'trailer', 'construction_vehicle']
_cycles = ['motorcycle', 'bicycle']
_pedestrians = ['pedestrian']
attribute_to_id = {
'': 0, 'cycle.with_rider' : 1, 'cycle.without_rider' : 2,
'pedestrian.moving': 3, 'pedestrian.standing': 4,
'pedestrian.sitting_lying_down': 5,
'vehicle.moving': 6, 'vehicle.parked': 7,
'vehicle.stopped': 8}
id_to_attribute = {v: k for k, v in attribute_to_id.items()}
def __init__(self, opt, split):
split_names = {'train': 'train', 'val': 'val'}
split_name = split_names[split]
data_dir = os.path.join(opt.data_dir, 'nuscenes')
img_dir = os.path.join(
data_dir, 'v1.0-mini' if split_name == 'mini' else 'v1.0-trainval')
print('Dataset version', opt.dataset_version)
if opt.dataset_version == 'test':
ann_path = os.path.join(data_dir,
'annotations', 'test.json')
img_dir = os.path.join(data_dir, 'v1.0-test')
else:
ann_path = os.path.join(data_dir,
'annotations', '{}{}.json').format(opt.dataset_version, split_name)
self.images = None
super(nuScenes, self).__init__(opt, split, ann_path, img_dir)
self.alpha_in_degree = False
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
def convert_coco_format(self, all_bboxes):
detections = []
for image_id in all_bboxes:
if type(all_bboxes[image_id]) != type({}):
# newest format
for j in range(len(all_bboxes[image_id])):
item = all_bboxes[image_id][j]
category_id = citem['class']
bbox = item['bbox']
bbox[2] -= bbox[0]
bbox[3] -= bbox[1]
bbox_out = list(map(self._to_float, bbox[0:4]))
detection = {
"image_id": int(image_id),
"category_id": int(category_id),
"bbox": bbox_out,
"score": float("{:.2f}".format(item['score']))
}
detections.append(detection)
return detections
def convert_eval_format(self, results):
from nuscenes.utils.data_classes import Box
ret = {'meta': {'use_camera': True, 'use_lidar': False, 'use_radar': False,
'use_map': False, 'use_external': False}, 'results': {}}
print('Converting nuscenes format...')
for image_id in self.images:
if not (image_id in results):
continue
image_info = self.coco.loadImgs(ids=[image_id])[0]
sample_token = image_info['sample_token']
trans_matrix = np.array(image_info['trans_matrix'], np.float32)
sensor_id = image_info['sensor_id']
sample_results = []
for item in results[image_id]:
class_name = self.class_name[int(item['class'] - 1)] \
if not ('detection_name' in item) else item['detection_name']
if self.opt.tracking and class_name in self._tracking_ignored_class:
continue
score = float(item['score']) \
if not ('detection_score' in item) else item['detection_score']
if 'size' in item:
size = item['size']
else:
size = [float(item['dim'][1]), float(item['dim'][2]), \
float(item['dim'][0])]
if 'translation' in item:
translation = item['translation']
else:
translation = np.dot(trans_matrix, np.array(
[item['loc'][0], item['loc'][1] - size[2], item['loc'][2], 1],
np.float32))
det_id = item['det_id'] if 'det_id' in item else -1
tracking_id = item['tracking_id'] if 'tracking_id' in item else 1
if not ('rotation' in item):
rot_cam = Quaternion(
axis=[0, 1, 0], angle=item['rot_y'])
loc = np.array(
[item['loc'][0], item['loc'][1], item['loc'][2]], np.float32)
box = Box(loc, size, rot_cam, name='2', token='1')
box.translate(np.array([0, - box.wlh[2] / 2, 0]))
box.rotate(Quaternion(image_info['cs_record_rot']))
box.translate(np.array(image_info['cs_record_trans']))
box.rotate(Quaternion(image_info['pose_record_rot']))
box.translate(np.array(image_info['pose_record_trans']))
rotation = box.orientation
rotation = [float(rotation.w), float(rotation.x), \
float(rotation.y), float(rotation.z)]
else:
rotation = item['rotation']
nuscenes_att = np.array(item['nuscenes_att'], np.float32) \
if 'nuscenes_att' in item else np.zeros(8, np.float32)
att = ''
if class_name in self._cycles:
att = self.id_to_attribute[np.argmax(nuscenes_att[0:2]) + 1]
elif class_name in self._pedestrians:
att = self.id_to_attribute[np.argmax(nuscenes_att[2:5]) + 3]
elif class_name in self._vehicles:
att = self.id_to_attribute[np.argmax(nuscenes_att[5:8]) + 6]
if 'velocity' in item and len(item['velocity']) == 2:
velocity = item['velocity']
else:
velocity = item['velocity'] if 'velocity' in item else [0, 0, 0]
velocity = np.dot(trans_matrix, np.array(
[velocity[0], velocity[1], velocity[2], 0], np.float32))
velocity = [float(velocity[0]), float(velocity[1])]
result = {
'sample_token': sample_token,
'translation': [float(translation[0]), float(translation[1]), \
float(translation[2])],
'size': size,
'rotation': rotation,
'velocity': velocity,
'detection_name': class_name,
'attribute_name': att \
if not ('attribute_name' in item) else item['attribute_name'],
'detection_score': score,
'tracking_name': class_name,
'tracking_score': score,
'tracking_id': tracking_id,
'sensor_id': sensor_id,
'det_id': det_id}
sample_results.append(result)
if sample_token in ret['results']:
ret['results'][sample_token] = ret['results'][sample_token] + \
sample_results
else:
ret['results'][sample_token] = sample_results
for sample_token in ret['results'].keys():
confs = sorted([(-d['detection_score'], ind) \
for ind, d in enumerate(ret['results'][sample_token])])
ret['results'][sample_token] = [ret['results'][sample_token][ind] \
for _, ind in confs[:min(500, len(confs))]]
return ret
def save_results(self, results, save_dir, task):
json.dump(self.convert_eval_format(results),
open('{}/results_nuscenes_{}.json'.format(save_dir, task), 'w'))
def run_eval(self, results, save_dir):
task = 'tracking' if self.opt.tracking else 'det'
self.save_results(results, save_dir, task)
if task == 'det':
os.system('python ' + \
'tools/nuscenes-devkit/python-sdk/nuscenes/eval/detection/evaluate.py ' +\
'{}/results_nuscenes_{}.json '.format(save_dir, task) + \
'--output_dir {}/nuscenes_eval_det_output/ '.format(save_dir) + \
'--dataroot ../data/nuscenes/v1.0-trainval/')
else:
os.system('python ' + \
'tools/nuscenes-devkit/python-sdk/nuscenes/eval/tracking/evaluate.py ' +\
'{}/results_nuscenes_{}.json '.format(save_dir, task) + \
'--output_dir {}/nuscenes_evaltracl__output/ '.format(save_dir) + \
'--dataroot ../data/nuscenes/v1.0-trainval/')
os.system('python ' + \
'tools/nuscenes-devkit/python-sdk-alpha02/nuscenes/eval/tracking/evaluate.py ' +\
'{}/results_nuscenes_{}.json '.format(save_dir, task) + \
'--output_dir {}/nuscenes_evaltracl__output/ '.format(save_dir) + \
'--dataroot ../data/nuscenes/v1.0-trainval/')
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/dataset/generic_dataset.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import math
import json
import cv2
import os
from collections import defaultdict
import pycocotools.coco as coco
import torch
import torch.utils.data as data
from utils.image import flip, color_aug
from utils.image import get_affine_transform, affine_transform
from utils.image import gaussian_radius, draw_umich_gaussian
import copy
class GenericDataset(data.Dataset):
is_fusion_dataset = False
default_resolution = None
num_categories = None
class_name = None
# cat_ids: map from 'category_id' in the annotation files to 1..num_categories
# Not using 0 because 0 is used for don't care region and ignore loss.
cat_ids = None
max_objs = None
rest_focal_length = 1200
num_joints = 17
flip_idx = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10],
[11, 12], [13, 14], [15, 16]]
edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[6, 12], [5, 11], [11, 12],
[12, 14], [14, 16], [11, 13], [13, 15]]
mean = np.array([0.40789654, 0.44719302, 0.47026115],
dtype=np.float32).reshape(1, 1, 3)
std = np.array([0.28863828, 0.27408164, 0.27809835],
dtype=np.float32).reshape(1, 1, 3)
_eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
_eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
ignore_val = 1
nuscenes_att_range = {0: [0, 1], 1: [0, 1], 2: [2, 3, 4], 3: [2, 3, 4],
4: [2, 3, 4], 5: [5, 6, 7], 6: [5, 6, 7], 7: [5, 6, 7]}
def __init__(self, opt=None, split=None, ann_path=None, img_dir=None):
super(GenericDataset, self).__init__()
if opt is not None and split is not None:
self.split = split
self.opt = opt
self._data_rng = np.random.RandomState(123)
if ann_path is not None and img_dir is not None:
print('==> initializing {} data from {}, \n images from {} ...'.format(
split, ann_path, img_dir))
self.coco = coco.COCO(ann_path)
self.images = self.coco.getImgIds()
if opt.tracking:
if not ('videos' in self.coco.dataset):
self.fake_video_data()
print('Creating video index!')
self.video_to_images = defaultdict(list)
for image in self.coco.dataset['images']:
self.video_to_images[image['video_id']].append(image)
self.img_dir = img_dir
def __getitem__(self, index):
opt = self.opt
img, anns, img_info, img_path = self._load_data(index)
height, width = img.shape[0], img.shape[1]
c = np.array([img.shape[1] / 2., img.shape[0] / 2.], dtype=np.float32)
s = max(img.shape[0], img.shape[1]) * 1.0 if not self.opt.not_max_crop \
else np.array([img.shape[1], img.shape[0]], np.float32)
aug_s, rot, flipped = 1, 0, 0
if self.split == 'train':
c, aug_s, rot = self._get_aug_param(c, s, width, height)
s = s * aug_s
if np.random.random() < opt.flip:
flipped = 1
img = img[:, ::-1, :]
anns = self._flip_anns(anns, width)
trans_input = get_affine_transform(
c, s, rot, [opt.input_w, opt.input_h])
trans_output = get_affine_transform(
c, s, rot, [opt.output_w, opt.output_h])
inp = self._get_input(img, trans_input)
ret = {'image': inp}
gt_det = {'bboxes': [], 'scores': [], 'clses': [], 'cts': []}
pre_cts, track_ids = None, None
if opt.tracking:
pre_image, pre_anns, frame_dist = self._load_pre_data(
img_info['video_id'], img_info['frame_id'],
img_info['sensor_id'] if 'sensor_id' in img_info else 1)
if flipped:
pre_image = pre_image[:, ::-1, :].copy()
pre_anns = self._flip_anns(pre_anns, width)
if opt.same_aug_pre and frame_dist != 0:
trans_input_pre = trans_input
trans_output_pre = trans_output
else:
c_pre, aug_s_pre, _ = self._get_aug_param(
c, s, width, height, disturb=True)
s_pre = s * aug_s_pre
trans_input_pre = get_affine_transform(
c_pre, s_pre, rot, [opt.input_w, opt.input_h])
trans_output_pre = get_affine_transform(
c_pre, s_pre, rot, [opt.output_w, opt.output_h])
pre_img = self._get_input(pre_image, trans_input_pre)
pre_hm, pre_cts, track_ids = self._get_pre_dets(
pre_anns, trans_input_pre, trans_output_pre)
ret['pre_img'] = pre_img
if opt.pre_hm:
ret['pre_hm'] = pre_hm
### init samples
self._init_ret(ret, gt_det)
calib = self._get_calib(img_info, width, height)
num_objs = min(len(anns), self.max_objs)
for k in range(num_objs):
ann = anns[k]
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id > self.opt.num_classes or cls_id <= -999:
continue
bbox, bbox_amodal = self._get_bbox_output(
ann['bbox'], trans_output, height, width)
if cls_id <= 0 or ('iscrowd' in ann and ann['iscrowd'] > 0):
self._mask_ignore_or_crowd(ret, cls_id, bbox)
continue
self._add_instance(
ret, gt_det, k, cls_id, bbox, bbox_amodal, ann, trans_output, aug_s,
calib, pre_cts, track_ids)
if self.opt.debug > 0:
gt_det = self._format_gt_det(gt_det)
meta = {'c': c, 's': s, 'gt_det': gt_det, 'img_id': img_info['id'],
'img_path': img_path, 'calib': calib,
'flipped': flipped}
ret['meta'] = meta
return ret
def get_default_calib(self, width, height):
calib = np.array([[self.rest_focal_length, 0, width / 2, 0],
[0, self.rest_focal_length, height / 2, 0],
[0, 0, 1, 0]])
return calib
def _load_image_anns(self, img_id, coco, img_dir):
img_info = coco.loadImgs(ids=[img_id])[0]
file_name = img_info['file_name']
img_path = os.path.join(img_dir, file_name)
ann_ids = coco.getAnnIds(imgIds=[img_id])
anns = copy.deepcopy(coco.loadAnns(ids=ann_ids))
img = cv2.imread(img_path)
return img, anns, img_info, img_path
def _load_data(self, index):
coco = self.coco
img_dir = self.img_dir
img_id = self.images[index]
img, anns, img_info, img_path = self._load_image_anns(img_id, coco, img_dir)
return img, anns, img_info, img_path
def _load_pre_data(self, video_id, frame_id, sensor_id=1):
img_infos = self.video_to_images[video_id]
# If training, random sample nearby frames as the "previoud" frame
# If testing, get the exact prevous frame
if 'train' in self.split:
img_ids = [(img_info['id'], img_info['frame_id']) \
for img_info in img_infos \
if abs(img_info['frame_id'] - frame_id) < self.opt.max_frame_dist and \
(not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]
else:
img_ids = [(img_info['id'], img_info['frame_id']) \
for img_info in img_infos \
if (img_info['frame_id'] - frame_id) == -1 and \
(not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]
if len(img_ids) == 0:
img_ids = [(img_info['id'], img_info['frame_id']) \
for img_info in img_infos \
if (img_info['frame_id'] - frame_id) == 0 and \
(not ('sensor_id' in img_info) or img_info['sensor_id'] == sensor_id)]
rand_id = np.random.choice(len(img_ids))
img_id, pre_frame_id = img_ids[rand_id]
frame_dist = abs(frame_id - pre_frame_id)
img, anns, _, _ = self._load_image_anns(img_id, self.coco, self.img_dir)
return img, anns, frame_dist
def _get_pre_dets(self, anns, trans_input, trans_output):
hm_h, hm_w = self.opt.input_h, self.opt.input_w
down_ratio = self.opt.down_ratio
trans = trans_input
reutrn_hm = self.opt.pre_hm
pre_hm = np.zeros((1, hm_h, hm_w), dtype=np.float32) if reutrn_hm else None
pre_cts, track_ids = [], []
for ann in anns:
cls_id = int(self.cat_ids[ann['category_id']])
if cls_id > self.opt.num_classes or cls_id <= -99 or \
('iscrowd' in ann and ann['iscrowd'] > 0):
continue
bbox = self._coco_box_to_bbox(ann['bbox'])
bbox[:2] = affine_transform(bbox[:2], trans)
bbox[2:] = affine_transform(bbox[2:], trans)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, hm_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, hm_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
max_rad = 1
if (h > 0 and w > 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
max_rad = max(max_rad, radius)
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct0 = ct.copy()
conf = 1
ct[0] = ct[0] + np.random.randn() * self.opt.hm_disturb * w
ct[1] = ct[1] + np.random.randn() * self.opt.hm_disturb * h
conf = 1 if np.random.random() > self.opt.lost_disturb else 0
ct_int = ct.astype(np.int32)
if conf == 0:
pre_cts.append(ct / down_ratio)
else:
pre_cts.append(ct0 / down_ratio)
track_ids.append(ann['track_id'] if 'track_id' in ann else -1)
if reutrn_hm:
draw_umich_gaussian(pre_hm[0], ct_int, radius, k=conf)
if np.random.random() < self.opt.fp_disturb and reutrn_hm:
ct2 = ct0.copy()
# Hard code heatmap disturb ratio, haven't tried other numbers.
ct2[0] = ct2[0] + np.random.randn() * 0.05 * w
ct2[1] = ct2[1] + np.random.randn() * 0.05 * h
ct2_int = ct2.astype(np.int32)
draw_umich_gaussian(pre_hm[0], ct2_int, radius, k=conf)
return pre_hm, pre_cts, track_ids
def _get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def _get_aug_param(self, c, s, width, height, disturb=False):
if (not self.opt.not_rand_crop) and not disturb:
aug_s = np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self._get_border(128, width)
h_border = self._get_border(128, height)
c[0] = np.random.randint(low=w_border, high=width - w_border)
c[1] = np.random.randint(low=h_border, high=height - h_border)
else:
sf = self.opt.scale
cf = self.opt.shift
if type(s) == float:
s = [s, s]
c[0] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
c[1] += s * np.clip(np.random.randn()*cf, -2*cf, 2*cf)
aug_s = np.clip(np.random.randn()*sf + 1, 1 - sf, 1 + sf)
if np.random.random() < self.opt.aug_rot:
rf = self.opt.rotate
rot = np.clip(np.random.randn()*rf, -rf*2, rf*2)
else:
rot = 0
return c, aug_s, rot
def _flip_anns(self, anns, width):
for k in range(len(anns)):
bbox = anns[k]['bbox']
anns[k]['bbox'] = [
width - bbox[0] - 1 - bbox[2], bbox[1], bbox[2], bbox[3]]
if 'hps' in self.opt.heads and 'keypoints' in anns[k]:
keypoints = np.array(anns[k]['keypoints'], dtype=np.float32).reshape(
self.num_joints, 3)
keypoints[:, 0] = width - keypoints[:, 0] - 1
for e in self.flip_idx:
keypoints[e[0]], keypoints[e[1]] = \
keypoints[e[1]].copy(), keypoints[e[0]].copy()
anns[k]['keypoints'] = keypoints.reshape(-1).tolist()
if 'rot' in self.opt.heads and 'alpha' in anns[k]:
anns[k]['alpha'] = np.pi - anns[k]['alpha'] if anns[k]['alpha'] > 0 \
else - np.pi - anns[k]['alpha']
if 'amodel_offset' in self.opt.heads and 'amodel_center' in anns[k]:
anns[k]['amodel_center'][0] = width - anns[k]['amodel_center'][0] - 1
if self.opt.velocity and 'velocity' in anns[k]:
anns[k]['velocity'] = [-10000, -10000, -10000]
return anns
def _get_input(self, img, trans_input):
inp = cv2.warpAffine(img, trans_input,
(self.opt.input_w, self.opt.input_h),
flags=cv2.INTER_LINEAR)
inp = (inp.astype(np.float32) / 255.)
if self.split == 'train' and not self.opt.no_color_aug:
color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
inp = (inp - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
return inp
def _init_ret(self, ret, gt_det):
max_objs = self.max_objs * self.opt.dense_reg
ret['hm'] = np.zeros(
(self.opt.num_classes, self.opt.output_h, self.opt.output_w),
np.float32)
ret['ind'] = np.zeros((max_objs), dtype=np.int64)
ret['cat'] = np.zeros((max_objs), dtype=np.int64)
ret['mask'] = np.zeros((max_objs), dtype=np.float32)
regression_head_dims = {
'reg': 2, 'wh': 2, 'tracking': 2, 'ltrb': 4, 'ltrb_amodal': 4,
'nuscenes_att': 8, 'velocity': 3, 'hps': self.num_joints * 2,
'dep': 1, 'dim': 3, 'amodel_offset': 2}
for head in regression_head_dims:
if head in self.opt.heads:
ret[head] = np.zeros(
(max_objs, regression_head_dims[head]), dtype=np.float32)
ret[head + '_mask'] = np.zeros(
(max_objs, regression_head_dims[head]), dtype=np.float32)
gt_det[head] = []
if 'hm_hp' in self.opt.heads:
num_joints = self.num_joints
ret['hm_hp'] = np.zeros(
(num_joints, self.opt.output_h, self.opt.output_w), dtype=np.float32)
ret['hm_hp_mask'] = np.zeros(
(max_objs * num_joints), dtype=np.float32)
ret['hp_offset'] = np.zeros(
(max_objs * num_joints, 2), dtype=np.float32)
ret['hp_ind'] = np.zeros((max_objs * num_joints), dtype=np.int64)
ret['hp_offset_mask'] = np.zeros(
(max_objs * num_joints, 2), dtype=np.float32)
ret['joint'] = np.zeros((max_objs * num_joints), dtype=np.int64)
if 'rot' in self.opt.heads:
ret['rotbin'] = np.zeros((max_objs, 2), dtype=np.int64)
ret['rotres'] = np.zeros((max_objs, 2), dtype=np.float32)
ret['rot_mask'] = np.zeros((max_objs), dtype=np.float32)
gt_det.update({'rot': []})
def _get_calib(self, img_info, width, height):
if 'calib' in img_info:
calib = np.array(img_info['calib'], dtype=np.float32)
else:
calib = np.array([[self.rest_focal_length, 0, width / 2, 0],
[0, self.rest_focal_length, height / 2, 0],
[0, 0, 1, 0]])
return calib
def _ignore_region(self, region, ignore_val=1):
np.maximum(region, ignore_val, out=region)
def _mask_ignore_or_crowd(self, ret, cls_id, bbox):
# mask out crowd region, only rectangular mask is supported
if cls_id == 0: # ignore all classes
self._ignore_region(ret['hm'][:, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
else:
# mask out one specific class
self._ignore_region(ret['hm'][abs(cls_id) - 1,
int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
if ('hm_hp' in ret) and cls_id <= 1:
self._ignore_region(ret['hm_hp'][:, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
def _coco_box_to_bbox(self, box):
bbox = np.array([box[0], box[1], box[0] + box[2], box[1] + box[3]],
dtype=np.float32)
return bbox
def _get_bbox_output(self, bbox, trans_output, height, width):
bbox = self._coco_box_to_bbox(bbox).copy()
rect = np.array([[bbox[0], bbox[1]], [bbox[0], bbox[3]],
[bbox[2], bbox[3]], [bbox[2], bbox[1]]], dtype=np.float32)
for t in range(4):
rect[t] = affine_transform(rect[t], trans_output)
bbox[:2] = rect[:, 0].min(), rect[:, 1].min()
bbox[2:] = rect[:, 0].max(), rect[:, 1].max()
bbox_amodal = copy.deepcopy(bbox)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.opt.output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.opt.output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
return bbox, bbox_amodal
def _add_instance(
self, ret, gt_det, k, cls_id, bbox, bbox_amodal, ann, trans_output,
aug_s, calib, pre_cts=None, track_ids=None):
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h <= 0 or w <= 0:
return
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
ret['cat'][k] = cls_id - 1
ret['mask'][k] = 1
if 'wh' in ret:
ret['wh'][k] = 1. * w, 1. * h
ret['wh_mask'][k] = 1
ret['ind'][k] = ct_int[1] * self.opt.output_w + ct_int[0]
ret['reg'][k] = ct - ct_int
ret['reg_mask'][k] = 1
draw_umich_gaussian(ret['hm'][cls_id - 1], ct_int, radius)
gt_det['bboxes'].append(
np.array([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2], dtype=np.float32))
gt_det['scores'].append(1)
gt_det['clses'].append(cls_id - 1)
gt_det['cts'].append(ct)
if 'tracking' in self.opt.heads:
if ann['track_id'] in track_ids:
pre_ct = pre_cts[track_ids.index(ann['track_id'])]
ret['tracking_mask'][k] = 1
ret['tracking'][k] = pre_ct - ct_int
gt_det['tracking'].append(ret['tracking'][k])
else:
gt_det['tracking'].append(np.zeros(2, np.float32))
if 'ltrb' in self.opt.heads:
ret['ltrb'][k] = bbox[0] - ct_int[0], bbox[1] - ct_int[1], \
bbox[2] - ct_int[0], bbox[3] - ct_int[1]
ret['ltrb_mask'][k] = 1
if 'ltrb_amodal' in self.opt.heads:
ret['ltrb_amodal'][k] = \
bbox_amodal[0] - ct_int[0], bbox_amodal[1] - ct_int[1], \
bbox_amodal[2] - ct_int[0], bbox_amodal[3] - ct_int[1]
ret['ltrb_amodal_mask'][k] = 1
gt_det['ltrb_amodal'].append(bbox_amodal)
if 'nuscenes_att' in self.opt.heads:
if ('attributes' in ann) and ann['attributes'] > 0:
att = int(ann['attributes'] - 1)
ret['nuscenes_att'][k][att] = 1
ret['nuscenes_att_mask'][k][self.nuscenes_att_range[att]] = 1
gt_det['nuscenes_att'].append(ret['nuscenes_att'][k])
if 'velocity' in self.opt.heads:
if ('velocity' in ann) and min(ann['velocity']) > -1000:
ret['velocity'][k] = np.array(ann['velocity'], np.float32)[:3]
ret['velocity_mask'][k] = 1
gt_det['velocity'].append(ret['velocity'][k])
if 'hps' in self.opt.heads:
self._add_hps(ret, k, ann, gt_det, trans_output, ct_int, bbox, h, w)
if 'rot' in self.opt.heads:
self._add_rot(ret, ann, k, gt_det)
if 'dep' in self.opt.heads:
if 'depth' in ann:
ret['dep_mask'][k] = 1
ret['dep'][k] = ann['depth'] * aug_s
gt_det['dep'].append(ret['dep'][k])
else:
gt_det['dep'].append(2)
if 'dim' in self.opt.heads:
if 'dim' in ann:
ret['dim_mask'][k] = 1
ret['dim'][k] = ann['dim']
gt_det['dim'].append(ret['dim'][k])
else:
gt_det['dim'].append([1,1,1])
if 'amodel_offset' in self.opt.heads:
if 'amodel_center' in ann:
amodel_center = affine_transform(ann['amodel_center'], trans_output)
ret['amodel_offset_mask'][k] = 1
ret['amodel_offset'][k] = amodel_center - ct_int
gt_det['amodel_offset'].append(ret['amodel_offset'][k])
else:
gt_det['amodel_offset'].append([0, 0])
def _add_hps(self, ret, k, ann, gt_det, trans_output, ct_int, bbox, h, w):
num_joints = self.num_joints
pts = np.array(ann['keypoints'], np.float32).reshape(num_joints, 3) \
if 'keypoints' in ann else np.zeros((self.num_joints, 3), np.float32)
if self.opt.simple_radius > 0:
hp_radius = int(simple_radius(h, w, min_overlap=self.opt.simple_radius))
else:
hp_radius = gaussian_radius((math.ceil(h), math.ceil(w)))
hp_radius = max(0, int(hp_radius))
for j in range(num_joints):
pts[j, :2] = affine_transform(pts[j, :2], trans_output)
if pts[j, 2] > 0:
if pts[j, 0] >= 0 and pts[j, 0] < self.opt.output_w and \
pts[j, 1] >= 0 and pts[j, 1] < self.opt.output_h:
ret['hps'][k, j * 2: j * 2 + 2] = pts[j, :2] - ct_int
ret['hps_mask'][k, j * 2: j * 2 + 2] = 1
pt_int = pts[j, :2].astype(np.int32)
ret['hp_offset'][k * num_joints + j] = pts[j, :2] - pt_int
ret['hp_ind'][k * num_joints + j] = \
pt_int[1] * self.opt.output_w + pt_int[0]
ret['hp_offset_mask'][k * num_joints + j] = 1
ret['hm_hp_mask'][k * num_joints + j] = 1
ret['joint'][k * num_joints + j] = j
draw_umich_gaussian(
ret['hm_hp'][j], pt_int, hp_radius)
if pts[j, 2] == 1:
ret['hm_hp'][j, pt_int[1], pt_int[0]] = self.ignore_val
ret['hp_offset_mask'][k * num_joints + j] = 0
ret['hm_hp_mask'][k * num_joints + j] = 0
else:
pts[j, :2] *= 0
else:
pts[j, :2] *= 0
self._ignore_region(
ret['hm_hp'][j, int(bbox[1]): int(bbox[3]) + 1,
int(bbox[0]): int(bbox[2]) + 1])
gt_det['hps'].append(pts[:, :2].reshape(num_joints * 2))
def _add_rot(self, ret, ann, k, gt_det):
if 'alpha' in ann:
ret['rot_mask'][k] = 1
alpha = ann['alpha']
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
ret['rotbin'][k, 0] = 1
ret['rotres'][k, 0] = alpha - (-0.5 * np.pi)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
ret['rotbin'][k, 1] = 1
ret['rotres'][k, 1] = alpha - (0.5 * np.pi)
gt_det['rot'].append(self._alpha_to_8(ann['alpha']))
else:
gt_det['rot'].append(self._alpha_to_8(0))
def _alpha_to_8(self, alpha):
ret = [0, 0, 0, 1, 0, 0, 0, 1]
if alpha < np.pi / 6. or alpha > 5 * np.pi / 6.:
r = alpha - (-0.5 * np.pi)
ret[1] = 1
ret[2], ret[3] = np.sin(r), np.cos(r)
if alpha > -np.pi / 6. or alpha < -5 * np.pi / 6.:
r = alpha - (0.5 * np.pi)
ret[5] = 1
ret[6], ret[7] = np.sin(r), np.cos(r)
return ret
def _format_gt_det(self, gt_det):
if (len(gt_det['scores']) == 0):
gt_det = {'bboxes': np.array([[0,0,1,1]], dtype=np.float32),
'scores': np.array([1], dtype=np.float32),
'clses': np.array([0], dtype=np.float32),
'cts': np.array([[0, 0]], dtype=np.float32),
'pre_cts': np.array([[0, 0]], dtype=np.float32),
'tracking': np.array([[0, 0]], dtype=np.float32),
'bboxes_amodal': np.array([[0, 0]], dtype=np.float32),
'hps': np.zeros((1, 17, 2), dtype=np.float32),}
gt_det = {k: np.array(gt_det[k], dtype=np.float32) for k in gt_det}
return gt_det
def fake_video_data(self):
self.coco.dataset['videos'] = []
for i in range(len(self.coco.dataset['images'])):
img_id = self.coco.dataset['images'][i]['id']
self.coco.dataset['images'][i]['video_id'] = img_id
self.coco.dataset['images'][i]['frame_id'] = 1
self.coco.dataset['videos'].append({'id': img_id})
if not ('annotations' in self.coco.dataset):
return
for i in range(len(self.coco.dataset['annotations'])):
self.coco.dataset['annotations'][i]['track_id'] = i + 1
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/detector.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import copy
import numpy as np
from progress.bar import Bar
import time
import torch
import math
from model.model import create_model, load_model
from model.decode import generic_decode
from model.utils import flip_tensor, flip_lr_off, flip_lr
from utils.image import get_affine_transform, affine_transform
from utils.image import draw_umich_gaussian, gaussian_radius
from utils.post_process import generic_post_process
from utils.debugger import Debugger
from utils.tracker import Tracker
from dataset.dataset_factory import get_dataset
class Detector(object):
def __init__(self, opt):
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
self.model = create_model(
opt.arch, opt.heads, opt.head_conv, opt=opt)
self.model = load_model(self.model, opt.load_model, opt)
self.model = self.model.to(opt.device)
self.model.eval()
self.opt = opt
self.trained_dataset = get_dataset(opt.dataset)
self.mean = np.array(
self.trained_dataset.mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(
self.trained_dataset.std, dtype=np.float32).reshape(1, 1, 3)
self.pause = not opt.no_pause
self.rest_focal_length = self.trained_dataset.rest_focal_length \
if self.opt.test_focal_length < 0 else self.opt.test_focal_length
self.flip_idx = self.trained_dataset.flip_idx
self.cnt = 0
self.pre_images = None
self.pre_image_ori = None
self.tracker = Tracker(opt)
self.debugger = Debugger(opt=opt, dataset=self.trained_dataset)
def run(self, image_or_path_or_tensor, meta={}):
load_time, pre_time, net_time, dec_time, post_time = 0, 0, 0, 0, 0
merge_time, track_time, tot_time, display_time = 0, 0, 0, 0
self.debugger.clear()
start_time = time.time()
# read image
pre_processed = False
if isinstance(image_or_path_or_tensor, np.ndarray):
image = image_or_path_or_tensor
elif type(image_or_path_or_tensor) == type (''):
image = cv2.imread(image_or_path_or_tensor)
else:
image = image_or_path_or_tensor['image'][0].numpy()
pre_processed_images = image_or_path_or_tensor
pre_processed = True
loaded_time = time.time()
load_time += (loaded_time - start_time)
detections = []
# for multi-scale testing
for scale in self.opt.test_scales:
scale_start_time = time.time()
if not pre_processed:
# not prefetch testing or demo
images, meta = self.pre_process(image, scale, meta)
else:
# prefetch testing
images = pre_processed_images['images'][scale][0]
meta = pre_processed_images['meta'][scale]
meta = {k: v.numpy()[0] for k, v in meta.items()}
if 'pre_dets' in pre_processed_images['meta']:
meta['pre_dets'] = pre_processed_images['meta']['pre_dets']
if 'cur_dets' in pre_processed_images['meta']:
meta['cur_dets'] = pre_processed_images['meta']['cur_dets']
images = images.to(self.opt.device, non_blocking=self.opt.non_block_test)
# initializing tracker
pre_hms, pre_inds = None, None
if self.opt.tracking:
# initialize the first frame
if self.pre_images is None:
print('Initialize tracking!')
self.pre_images = images
self.tracker.init_track(
meta['pre_dets'] if 'pre_dets' in meta else [])
if self.opt.pre_hm:
# render input heatmap from tracker status
# pre_inds is not used in the current version.
# We used pre_inds for learning an offset from previous image to
# the current image.
pre_hms, pre_inds = self._get_additional_inputs(
self.tracker.tracks, meta, with_hm=not self.opt.zero_pre_hm)
pre_process_time = time.time()
pre_time += pre_process_time - scale_start_time
# run the network
# output: the output feature maps, only used for visualizing
# dets: output tensors after extracting peaks
output, dets, forward_time = self.process(
images, self.pre_images, pre_hms, pre_inds, return_time=True)
net_time += forward_time - pre_process_time
decode_time = time.time()
dec_time += decode_time - forward_time
# convert the cropped and 4x downsampled output coordinate system
# back to the input image coordinate system
result = self.post_process(dets, meta, scale)
post_process_time = time.time()
post_time += post_process_time - decode_time
detections.append(result)
if self.opt.debug >= 2:
self.debug(
self.debugger, images, result, output, scale,
pre_images=self.pre_images if not self.opt.no_pre_img else None,
pre_hms=pre_hms)
# merge multi-scale testing results
results = self.merge_outputs(detections)
torch.cuda.synchronize()
end_time = time.time()
merge_time += end_time - post_process_time
if self.opt.tracking:
# public detection mode in MOT challenge
public_det = meta['cur_dets'] if self.opt.public_det else None
# add tracking id to results
results = self.tracker.step(results, public_det)
self.pre_images = images
tracking_time = time.time()
track_time += tracking_time - end_time
tot_time += tracking_time - start_time
if self.opt.debug >= 1:
self.show_results(self.debugger, image, results)
self.cnt += 1
show_results_time = time.time()
display_time += show_results_time - end_time
# return results and run time
ret = {'results': results, 'tot': tot_time, 'load': load_time,
'pre': pre_time, 'net': net_time, 'dec': dec_time,
'post': post_time, 'merge': merge_time, 'track': track_time,
'display': display_time}
if self.opt.save_video:
try:
# return debug image for saving video
ret.update({'generic': self.debugger.imgs['generic']})
except:
pass
return ret
def _transform_scale(self, image, scale=1):
'''
Prepare input image in different testing modes.
Currently support: fix short size/ center crop to a fixed size/
keep original resolution but pad to a multiplication of 32
'''
height, width = image.shape[0:2]
new_height = int(height * scale)
new_width = int(width * scale)
if self.opt.fix_short > 0:
if height < width:
inp_height = self.opt.fix_short
inp_width = (int(width / height * self.opt.fix_short) + 63) // 64 * 64
else:
inp_height = (int(height / width * self.opt.fix_short) + 63) // 64 * 64
inp_width = self.opt.fix_short
c = np.array([width / 2, height / 2], dtype=np.float32)
s = np.array([width, height], dtype=np.float32)
elif self.opt.fix_res:
inp_height, inp_width = self.opt.input_h, self.opt.input_w
c = np.array([new_width / 2., new_height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
# s = np.array([inp_width, inp_height], dtype=np.float32)
else:
inp_height = (new_height | self.opt.pad) + 1
inp_width = (new_width | self.opt.pad) + 1
c = np.array([new_width // 2, new_height // 2], dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
resized_image = cv2.resize(image, (new_width, new_height))
return resized_image, c, s, inp_width, inp_height, height, width
def pre_process(self, image, scale, input_meta={}):
'''
Crop, resize, and normalize image. Gather meta data for post processing
and tracking.
'''
resized_image, c, s, inp_width, inp_height, height, width = \
self._transform_scale(image)
trans_input = get_affine_transform(c, s, 0, [inp_width, inp_height])
out_height = inp_height // self.opt.down_ratio
out_width = inp_width // self.opt.down_ratio
trans_output = get_affine_transform(c, s, 0, [out_width, out_height])
inp_image = cv2.warpAffine(
resized_image, trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
if self.opt.flip_test:
images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
images = torch.from_numpy(images)
meta = {'calib': np.array(input_meta['calib'], dtype=np.float32) \
if 'calib' in input_meta else \
self._get_default_calib(width, height)}
meta.update({'c': c, 's': s, 'height': height, 'width': width,
'out_height': out_height, 'out_width': out_width,
'inp_height': inp_height, 'inp_width': inp_width,
'trans_input': trans_input, 'trans_output': trans_output})
if 'pre_dets' in input_meta:
meta['pre_dets'] = input_meta['pre_dets']
if 'cur_dets' in input_meta:
meta['cur_dets'] = input_meta['cur_dets']
return images, meta
def _trans_bbox(self, bbox, trans, width, height):
'''
Transform bounding boxes according to image crop.
'''
bbox = np.array(copy.deepcopy(bbox), dtype=np.float32)
bbox[:2] = affine_transform(bbox[:2], trans)
bbox[2:] = affine_transform(bbox[2:], trans)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, width - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, height - 1)
return bbox
def _get_additional_inputs(self, dets, meta, with_hm=True):
'''
Render input heatmap from previous trackings.
'''
trans_input, trans_output = meta['trans_input'], meta['trans_output']
inp_width, inp_height = meta['inp_width'], meta['inp_height']
out_width, out_height = meta['out_width'], meta['out_height']
input_hm = np.zeros((1, inp_height, inp_width), dtype=np.float32)
output_inds = []
for det in dets:
if det['score'] < self.opt.pre_thresh or det['active'] == 0:
continue
bbox = self._trans_bbox(det['bbox'], trans_input, inp_width, inp_height)
bbox_out = self._trans_bbox(
det['bbox'], trans_output, out_width, out_height)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if (h > 0 and w > 0):
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
if with_hm:
draw_umich_gaussian(input_hm[0], ct_int, radius)
ct_out = np.array(
[(bbox_out[0] + bbox_out[2]) / 2,
(bbox_out[1] + bbox_out[3]) / 2], dtype=np.int32)
output_inds.append(ct_out[1] * out_width + ct_out[0])
if with_hm:
input_hm = input_hm[np.newaxis]
if self.opt.flip_test:
input_hm = np.concatenate((input_hm, input_hm[:, :, :, ::-1]), axis=0)
input_hm = torch.from_numpy(input_hm).to(self.opt.device)
output_inds = np.array(output_inds, np.int64).reshape(1, -1)
output_inds = torch.from_numpy(output_inds).to(self.opt.device)
return input_hm, output_inds
def _get_default_calib(self, width, height):
calib = np.array([[self.rest_focal_length, 0, width / 2, 0],
[0, self.rest_focal_length, height / 2, 0],
[0, 0, 1, 0]])
return calib
def _sigmoid_output(self, output):
if 'hm' in output:
output['hm'] = output['hm'].sigmoid_()
if 'hm_hp' in output:
output['hm_hp'] = output['hm_hp'].sigmoid_()
if 'dep' in output:
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
output['dep'] *= self.opt.depth_scale
return output
def _flip_output(self, output):
average_flips = ['hm', 'wh', 'dep', 'dim']
neg_average_flips = ['amodel_offset']
single_flips = ['ltrb', 'nuscenes_att', 'velocity', 'ltrb_amodal', 'reg',
'hp_offset', 'rot', 'tracking', 'pre_hm']
for head in output:
if head in average_flips:
output[head] = (output[head][0:1] + flip_tensor(output[head][1:2])) / 2
if head in neg_average_flips:
flipped_tensor = flip_tensor(output[head][1:2])
flipped_tensor[:, 0::2] *= -1
output[head] = (output[head][0:1] + flipped_tensor) / 2
if head in single_flips:
output[head] = output[head][0:1]
if head == 'hps':
output['hps'] = (output['hps'][0:1] +
flip_lr_off(output['hps'][1:2], self.flip_idx)) / 2
if head == 'hm_hp':
output['hm_hp'] = (output['hm_hp'][0:1] + \
flip_lr(output['hm_hp'][1:2], self.flip_idx)) / 2
return output
def process(self, images, pre_images=None, pre_hms=None,
pre_inds=None, return_time=False):
with torch.no_grad():
torch.cuda.synchronize()
output = self.model(images, pre_images, pre_hms)[-1]
output = self._sigmoid_output(output)
output.update({'pre_inds': pre_inds})
if self.opt.flip_test:
output = self._flip_output(output)
torch.cuda.synchronize()
forward_time = time.time()
dets = generic_decode(output, K=self.opt.K, opt=self.opt)
torch.cuda.synchronize()
for k in dets:
dets[k] = dets[k].detach().cpu().numpy()
if return_time:
return output, dets, forward_time
else:
return output, dets
def post_process(self, dets, meta, scale=1):
dets = generic_post_process(
self.opt, dets, [meta['c']], [meta['s']],
meta['out_height'], meta['out_width'], self.opt.num_classes,
[meta['calib']], meta['height'], meta['width'])
self.this_calib = meta['calib']
if scale != 1:
for i in range(len(dets[0])):
for k in ['bbox', 'hps']:
if k in dets[0][i]:
dets[0][i][k] = (np.array(
dets[0][i][k], np.float32) / scale).tolist()
return dets[0]
def merge_outputs(self, detections):
assert len(self.opt.test_scales) == 1, 'multi_scale not supported!'
results = []
for i in range(len(detections[0])):
if detections[0][i]['score'] > self.opt.out_thresh:
results.append(detections[0][i])
return results
def debug(self, debugger, images, dets, output, scale=1,
pre_images=None, pre_hms=None):
img = images[0].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
if 'hm_hp' in output:
pred = debugger.gen_colormap_hp(
output['hm_hp'][0].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
if pre_images is not None:
pre_img = pre_images[0].detach().cpu().numpy().transpose(1, 2, 0)
pre_img = np.clip(((
pre_img * self.std + self.mean) * 255.), 0, 255).astype(np.uint8)
debugger.add_img(pre_img, 'pre_img')
if pre_hms is not None:
pre_hm = debugger.gen_colormap(
pre_hms[0].detach().cpu().numpy())
debugger.add_blend_img(pre_img, pre_hm, 'pre_hm')
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='generic')
if self.opt.tracking:
debugger.add_img(self.pre_image_ori if self.pre_image_ori is not None else image,
img_id='previous')
self.pre_image_ori = image
for j in range(len(results)):
if results[j]['score'] > self.opt.vis_thresh:
if 'active' in results[j] and results[j]['active'] == 0:
continue
item = results[j]
if ('bbox' in item):
sc = item['score'] if self.opt.demo == '' or \
not ('tracking_id' in item) else item['tracking_id']
sc = item['tracking_id'] if self.opt.show_track_color else sc
debugger.add_coco_bbox(
item['bbox'], item['class'] - 1, sc, img_id='generic')
if 'tracking' in item:
debugger.add_arrow(item['ct'], item['tracking'], img_id='generic')
tracking_id = item['tracking_id'] if 'tracking_id' in item else -1
if 'tracking_id' in item and self.opt.demo == '' and \
not self.opt.show_track_color:
debugger.add_tracking_id(
item['ct'], item['tracking_id'], img_id='generic')
if (item['class'] in [1, 2]) and 'hps' in item:
debugger.add_coco_hp(item['hps'], tracking_id=tracking_id,
img_id='generic')
if len(results) > 0 and \
'dep' in results[0] and 'alpha' in results[0] and 'dim' in results[0]:
debugger.add_3d_detection(
image if not self.opt.qualitative else cv2.resize(
debugger.imgs['pred_hm'], (image.shape[1], image.shape[0])),
False, results, self.this_calib,
vis_thresh=self.opt.vis_thresh, img_id='ddd_pred')
debugger.add_bird_view(
results, vis_thresh=self.opt.vis_thresh,
img_id='bird_pred', cnt=self.cnt)
if self.opt.show_track_color and self.opt.debug == 4:
del debugger.imgs['generic'], debugger.imgs['bird_pred']
if 'ddd_pred' in debugger.imgs:
debugger.imgs['generic'] = debugger.imgs['ddd_pred']
if self.opt.debug == 4:
debugger.save_all_imgs(self.opt.debug_dir, prefix='{}'.format(self.cnt))
else:
debugger.show_all_imgs(pause=self.pause)
def reset_tracking(self):
self.tracker.reset()
self.pre_images = None
self.pre_image_ori = None
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/external/nms.pyx | Cython | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
# ----------------------------------------------------------
# Soft-NMS: Improving Object Detection With One Line of Code
# Copyright (c) University of Maryland, College Park
# Licensed under The MIT License [see LICENSE for details]
# Written by Navaneeth Bodla and Bharat Singh
# ----------------------------------------------------------
import numpy as np
cimport numpy as np
cdef inline np.float32_t max(np.float32_t a, np.float32_t b):
return a if a >= b else b
cdef inline np.float32_t min(np.float32_t a, np.float32_t b):
return a if a <= b else b
def nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh):
cdef np.ndarray[np.float32_t, ndim=1] x1 = dets[:, 0]
cdef np.ndarray[np.float32_t, ndim=1] y1 = dets[:, 1]
cdef np.ndarray[np.float32_t, ndim=1] x2 = dets[:, 2]
cdef np.ndarray[np.float32_t, ndim=1] y2 = dets[:, 3]
cdef np.ndarray[np.float32_t, ndim=1] scores = dets[:, 4]
cdef np.ndarray[np.float32_t, ndim=1] areas = (x2 - x1 + 1) * (y2 - y1 + 1)
cdef np.ndarray[np.int_t, ndim=1] order = scores.argsort()[::-1]
cdef int ndets = dets.shape[0]
cdef np.ndarray[np.int_t, ndim=1] suppressed = \
np.zeros((ndets), dtype=np.int)
# nominal indices
cdef int _i, _j
# sorted indices
cdef int i, j
# temp variables for box i's (the box currently under consideration)
cdef np.float32_t ix1, iy1, ix2, iy2, iarea
# variables for computing overlap with box j (lower scoring box)
cdef np.float32_t xx1, yy1, xx2, yy2
cdef np.float32_t w, h
cdef np.float32_t inter, ovr
keep = []
for _i in range(ndets):
i = order[_i]
if suppressed[i] == 1:
continue
keep.append(i)
ix1 = x1[i]
iy1 = y1[i]
ix2 = x2[i]
iy2 = y2[i]
iarea = areas[i]
for _j in range(_i + 1, ndets):
j = order[_j]
if suppressed[j] == 1:
continue
xx1 = max(ix1, x1[j])
yy1 = max(iy1, y1[j])
xx2 = min(ix2, x2[j])
yy2 = min(iy2, y2[j])
w = max(0.0, xx2 - xx1 + 1)
h = max(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (iarea + areas[j] - inter)
if ovr >= thresh:
suppressed[j] = 1
return keep
def soft_nms(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0):
cdef unsigned int N = boxes.shape[0]
cdef float iw, ih, box_area
cdef float ua
cdef int pos = 0
cdef float maxscore = 0
cdef int maxpos = 0
cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
N = N - 1
pos = pos - 1
pos = pos + 1
keep = [i for i in range(N)]
return keep
def soft_nms_39(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0):
cdef unsigned int N = boxes.shape[0]
cdef float iw, ih, box_area
cdef float ua
cdef int pos = 0
cdef float maxscore = 0
cdef int maxpos = 0
cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
cdef float tmp
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
for j in range(5, 39):
tmp = boxes[i, j]
boxes[i, j] = boxes[maxpos, j]
boxes[maxpos, j] = tmp
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
for j in range(5, 39):
tmp = boxes[pos, j]
boxes[pos, j] = boxes[N - 1, j]
boxes[N - 1, j] = tmp
N = N - 1
pos = pos - 1
pos = pos + 1
keep = [i for i in range(N)]
return keep
def soft_nms_merge(np.ndarray[float, ndim=2] boxes, float sigma=0.5, float Nt=0.3, float threshold=0.001, unsigned int method=0, float weight_exp=6):
cdef unsigned int N = boxes.shape[0]
cdef float iw, ih, box_area
cdef float ua
cdef int pos = 0
cdef float maxscore = 0
cdef int maxpos = 0
cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
cdef float mx1,mx2,my1,my2,mts,mbs,mw
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
mx1 = boxes[i, 0] * boxes[i, 5]
my1 = boxes[i, 1] * boxes[i, 5]
mx2 = boxes[i, 2] * boxes[i, 6]
my2 = boxes[i, 3] * boxes[i, 6]
mts = boxes[i, 5]
mbs = boxes[i, 6]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
mw = (1 - weight) ** weight_exp
mx1 = mx1 + boxes[pos, 0] * boxes[pos, 5] * mw
my1 = my1 + boxes[pos, 1] * boxes[pos, 5] * mw
mx2 = mx2 + boxes[pos, 2] * boxes[pos, 6] * mw
my2 = my2 + boxes[pos, 3] * boxes[pos, 6] * mw
mts = mts + boxes[pos, 5] * mw
mbs = mbs + boxes[pos, 6] * mw
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
N = N - 1
pos = pos - 1
pos = pos + 1
boxes[i, 0] = mx1 / mts
boxes[i, 1] = my1 / mts
boxes[i, 2] = mx2 / mbs
boxes[i, 3] = my2 / mbs
keep = [i for i in range(N)]
return keep
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/external/setup.py | Python | import numpy
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
extensions = [
Extension(
"nms",
["nms.pyx"],
extra_compile_args=["-Wno-cpp", "-Wno-unused-function"]
)
]
setup(
name="coco",
ext_modules=cythonize(extensions),
include_dirs=[numpy.get_include()]
)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/logger.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import os
import time
import sys
import torch
import subprocess
USE_TENSORBOARD = True
try:
import tensorboardX
print('Using tensorboardX')
except:
USE_TENSORBOARD = False
class Logger(object):
def __init__(self, opt):
"""Create a summary writer logging to log_dir."""
if not os.path.exists(opt.save_dir):
os.makedirs(opt.save_dir)
if not os.path.exists(opt.debug_dir):
os.makedirs(opt.debug_dir)
time_str = time.strftime('%Y-%m-%d-%H-%M')
args = dict((name, getattr(opt, name)) for name in dir(opt)
if not name.startswith('_'))
file_name = os.path.join(opt.save_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('==> commit hash: {}\n'.format(
subprocess.check_output(["git", "describe"])))
opt_file.write('==> torch version: {}\n'.format(torch.__version__))
opt_file.write('==> cudnn version: {}\n'.format(
torch.backends.cudnn.version()))
opt_file.write('==> Cmd:\n')
opt_file.write(str(sys.argv))
opt_file.write('\n==> Opt:\n')
for k, v in sorted(args.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
log_dir = opt.save_dir + '/logs_{}'.format(time_str)
if USE_TENSORBOARD:
self.writer = tensorboardX.SummaryWriter(log_dir=log_dir)
else:
if not os.path.exists(os.path.dirname(log_dir)):
os.mkdir(os.path.dirname(log_dir))
if not os.path.exists(log_dir):
os.mkdir(log_dir)
self.log = open(log_dir + '/log.txt', 'w')
try:
os.system('cp {}/opt.txt {}/'.format(opt.save_dir, log_dir))
except:
pass
self.start_line = True
def write(self, txt):
if self.start_line:
time_str = time.strftime('%Y-%m-%d-%H-%M')
self.log.write('{}: {}'.format(time_str, txt))
else:
self.log.write(txt)
self.start_line = False
if '\n' in txt:
self.start_line = True
self.log.flush()
def close(self):
self.log.close()
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
if USE_TENSORBOARD:
self.writer.add_scalar(tag, value, step)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/data_parallel.py | Python | import torch
from torch.nn.modules import Module
from torch.nn.parallel.scatter_gather import gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from .scatter_gather import scatter_kwargs
class _DataParallel(Module):
r"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the batch
dimension. In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards
pass, gradients from each replica are summed into the original module.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is the
same size (so that each GPU processes the same number of samples).
See also: :ref:`cuda-nn-dataparallel-instead`
Arbitrary positional and keyword inputs are allowed to be passed into
DataParallel EXCEPT Tensors. All variables will be scattered on dim
specified (default 0). Primitive types will be broadcasted, but all
other types will be a shallow copy and can be corrupted if written to in
the model's forward pass.
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
output_device: device location of output (default: device_ids[0])
Example::
>>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
>>> output = net(input_var)
"""
# TODO: update notes/cuda.rst when this class handles 8+ GPUs well
def __init__(self, module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
super(_DataParallel, self).__init__()
if not torch.cuda.is_available():
self.module = module
self.device_ids = []
return
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
self.dim = dim
self.module = module
self.device_ids = device_ids
self.chunk_sizes = chunk_sizes
self.output_device = output_device
if len(self.device_ids) == 1:
self.module.cuda(device_ids[0])
def forward(self, *inputs, **kwargs):
if not self.device_ids:
return self.module(*inputs, **kwargs)
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids, self.chunk_sizes)
if len(self.device_ids) == 1:
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
outputs = self.parallel_apply(replicas, inputs, kwargs)
return self.gather(outputs, self.output_device)
def replicate(self, module, device_ids):
return replicate(module, device_ids)
def scatter(self, inputs, kwargs, device_ids, chunk_sizes):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim, chunk_sizes=self.chunk_sizes)
def parallel_apply(self, replicas, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
def gather(self, outputs, output_device):
return gather(outputs, output_device, dim=self.dim)
def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module: the module to evaluate in parallel
inputs: inputs to the module
device_ids: GPU ids on which to replicate module
output_device: GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Variable containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
if device_ids is None:
device_ids = list(range(torch.cuda.device_count()))
if output_device is None:
output_device = device_ids[0]
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
def DataParallel(module, device_ids=None, output_device=None, dim=0, chunk_sizes=None):
if chunk_sizes is None:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
standard_size = True
for i in range(1, len(chunk_sizes)):
if chunk_sizes[i] != chunk_sizes[0]:
standard_size = False
if standard_size:
return torch.nn.DataParallel(module, device_ids, output_device, dim)
return _DataParallel(module, device_ids, output_device, dim, chunk_sizes) | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/decode.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .utils import _gather_feat, _tranpose_and_gather_feat
from .utils import _nms, _topk, _topk_channel
def _update_kps_with_hm(
kps, output, batch, num_joints, K, bboxes=None, scores=None):
if 'hm_hp' in output:
hm_hp = output['hm_hp']
hm_hp = _nms(hm_hp)
thresh = 0.2
kps = kps.view(batch, K, num_joints, 2).permute(
0, 2, 1, 3).contiguous() # b x J x K x 2
reg_kps = kps.unsqueeze(3).expand(batch, num_joints, K, K, 2)
hm_score, hm_inds, hm_ys, hm_xs = _topk_channel(hm_hp, K=K) # b x J x K
if 'hp_offset' in output or 'reg' in output:
hp_offset = output['hp_offset'] if 'hp_offset' in output \
else output['reg']
hp_offset = _tranpose_and_gather_feat(
hp_offset, hm_inds.view(batch, -1))
hp_offset = hp_offset.view(batch, num_joints, K, 2)
hm_xs = hm_xs + hp_offset[:, :, :, 0]
hm_ys = hm_ys + hp_offset[:, :, :, 1]
else:
hm_xs = hm_xs + 0.5
hm_ys = hm_ys + 0.5
mask = (hm_score > thresh).float()
hm_score = (1 - mask) * -1 + mask * hm_score
hm_ys = (1 - mask) * (-10000) + mask * hm_ys
hm_xs = (1 - mask) * (-10000) + mask * hm_xs
hm_kps = torch.stack([hm_xs, hm_ys], dim=-1).unsqueeze(
2).expand(batch, num_joints, K, K, 2)
dist = (((reg_kps - hm_kps) ** 2).sum(dim=4) ** 0.5)
min_dist, min_ind = dist.min(dim=3) # b x J x K
hm_score = hm_score.gather(2, min_ind).unsqueeze(-1) # b x J x K x 1
min_dist = min_dist.unsqueeze(-1)
min_ind = min_ind.view(batch, num_joints, K, 1, 1).expand(
batch, num_joints, K, 1, 2)
hm_kps = hm_kps.gather(3, min_ind)
hm_kps = hm_kps.view(batch, num_joints, K, 2)
mask = (hm_score < thresh)
if bboxes is not None:
l = bboxes[:, :, 0].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
t = bboxes[:, :, 1].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
r = bboxes[:, :, 2].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
b = bboxes[:, :, 3].view(batch, 1, K, 1).expand(batch, num_joints, K, 1)
mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \
(hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + mask
else:
l = kps[:, :, :, 0:1].min(dim=1, keepdim=True)[0]
t = kps[:, :, :, 1:2].min(dim=1, keepdim=True)[0]
r = kps[:, :, :, 0:1].max(dim=1, keepdim=True)[0]
b = kps[:, :, :, 1:2].max(dim=1, keepdim=True)[0]
margin = 0.25
l = l - (r - l) * margin
r = r + (r - l) * margin
t = t - (b - t) * margin
b = b + (b - t) * margin
mask = (hm_kps[..., 0:1] < l) + (hm_kps[..., 0:1] > r) + \
(hm_kps[..., 1:2] < t) + (hm_kps[..., 1:2] > b) + mask
# sc = (kps[:, :, :, :].max(dim=1, keepdim=True) - kps[:, :, :, :].min(dim=1))
# mask = mask + (min_dist > 10)
mask = (mask > 0).float()
kps_score = (1 - mask) * hm_score + mask * \
scores.unsqueeze(-1).expand(batch, num_joints, K, 1) # bJK1
kps_score = scores * kps_score.mean(dim=1).view(batch, K)
# kps_score[scores < 0.1] = 0
mask = mask.expand(batch, num_joints, K, 2)
kps = (1 - mask) * hm_kps + mask * kps
kps = kps.permute(0, 2, 1, 3).contiguous().view(
batch, K, num_joints * 2)
return kps, kps_score
else:
return kps, kps
def generic_decode(output, K=100, opt=None):
if not ('hm' in output):
return {}
if opt.zero_tracking:
output['tracking'] *= 0
heat = output['hm']
batch, cat, height, width = heat.size()
heat = _nms(heat)
scores, inds, clses, ys0, xs0 = _topk(heat, K=K)
clses = clses.view(batch, K)
scores = scores.view(batch, K)
bboxes = None
cts = torch.cat([xs0.unsqueeze(2), ys0.unsqueeze(2)], dim=2)
ret = {'scores': scores, 'clses': clses.float(),
'xs': xs0, 'ys': ys0, 'cts': cts}
if 'reg' in output:
reg = output['reg']
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs0.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys0.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs0.view(batch, K, 1) + 0.5
ys = ys0.view(batch, K, 1) + 0.5
if 'wh' in output:
wh = output['wh']
wh = _tranpose_and_gather_feat(wh, inds) # B x K x (F)
# wh = wh.view(batch, K, -1)
wh = wh.view(batch, K, 2)
wh[wh < 0] = 0
if wh.size(2) == 2 * cat: # cat spec
wh = wh.view(batch, K, -1, 2)
cats = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2)
wh = wh.gather(2, cats.long()).squeeze(2) # B x K x 2
else:
pass
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
ret['bboxes'] = bboxes
# print('ret bbox', ret['bboxes'])
if 'ltrb' in output:
ltrb = output['ltrb']
ltrb = _tranpose_and_gather_feat(ltrb, inds) # B x K x 4
ltrb = ltrb.view(batch, K, 4)
bboxes = torch.cat([xs0.view(batch, K, 1) + ltrb[..., 0:1],
ys0.view(batch, K, 1) + ltrb[..., 1:2],
xs0.view(batch, K, 1) + ltrb[..., 2:3],
ys0.view(batch, K, 1) + ltrb[..., 3:4]], dim=2)
ret['bboxes'] = bboxes
regression_heads = ['tracking', 'dep', 'rot', 'dim', 'amodel_offset',
'nuscenes_att', 'velocity']
for head in regression_heads:
if head in output:
ret[head] = _tranpose_and_gather_feat(
output[head], inds).view(batch, K, -1)
if 'ltrb_amodal' in output:
ltrb_amodal = output['ltrb_amodal']
ltrb_amodal = _tranpose_and_gather_feat(ltrb_amodal, inds) # B x K x 4
ltrb_amodal = ltrb_amodal.view(batch, K, 4)
bboxes_amodal = torch.cat([xs0.view(batch, K, 1) + ltrb_amodal[..., 0:1],
ys0.view(batch, K, 1) + ltrb_amodal[..., 1:2],
xs0.view(batch, K, 1) + ltrb_amodal[..., 2:3],
ys0.view(batch, K, 1) + ltrb_amodal[..., 3:4]], dim=2)
ret['bboxes_amodal'] = bboxes_amodal
ret['bboxes'] = bboxes_amodal
if 'hps' in output:
kps = output['hps']
num_joints = kps.shape[1] // 2
kps = _tranpose_and_gather_feat(kps, inds)
kps = kps.view(batch, K, num_joints * 2)
kps[..., ::2] += xs0.view(batch, K, 1).expand(batch, K, num_joints)
kps[..., 1::2] += ys0.view(batch, K, 1).expand(batch, K, num_joints)
kps, kps_score = _update_kps_with_hm(
kps, output, batch, num_joints, K, bboxes, scores)
ret['hps'] = kps
ret['kps_score'] = kps_score
if 'pre_inds' in output and output['pre_inds'] is not None:
pre_inds = output['pre_inds'] # B x pre_K
pre_K = pre_inds.shape[1]
pre_ys = (pre_inds / width).int().float()
pre_xs = (pre_inds % width).int().float()
ret['pre_cts'] = torch.cat(
[pre_xs.unsqueeze(2), pre_ys.unsqueeze(2)], dim=2)
return ret
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/losses.py | Python | # ------------------------------------------------------------------------------
# Portions of this code are from
# CornerNet (https://github.com/princeton-vl/CornerNet)
# Copyright (c) 2018, University of Michigan
# Licensed under the BSD 3-Clause License
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
from .utils import _tranpose_and_gather_feat, _nms, _topk
import torch.nn.functional as F
from utils.image import draw_umich_gaussian
def _slow_neg_loss(pred, gt):
'''focal loss from CornerNet'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt[neg_inds], 4)
loss = 0
pos_pred = pred[pos_inds]
neg_pred = pred[neg_inds]
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2)
neg_loss = torch.log(1 - neg_pred) * torch.pow(neg_pred, 2) * neg_weights
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if pos_pred.nelement() == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _neg_loss(pred, gt):
''' Reimplemented focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _only_neg_loss(pred, gt):
gt = torch.pow(1 - gt, 4)
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * gt
return neg_loss.sum()
class FastFocalLoss(nn.Module):
'''
Reimplemented focal loss, exactly the same as the CornerNet version.
Faster and costs much less memory.
'''
def __init__(self, opt=None):
super(FastFocalLoss, self).__init__()
self.only_neg_loss = _only_neg_loss
def forward(self, out, target, ind, mask, cat):
'''
Arguments:
out, target: B x C x H x W
ind, mask: B x M
cat (category id for peaks): B x M
'''
neg_loss = self.only_neg_loss(out, target)
pos_pred_pix = _tranpose_and_gather_feat(out, ind) # B x M x C
pos_pred = pos_pred_pix.gather(2, cat.unsqueeze(2)) # B x M
num_pos = mask.sum()
pos_loss = torch.log(pos_pred) * torch.pow(1 - pos_pred, 2) * \
mask.unsqueeze(2)
pos_loss = pos_loss.sum()
if num_pos == 0:
return - neg_loss
return - (pos_loss + neg_loss) / num_pos
def _reg_loss(regr, gt_regr, mask):
''' L1 regression loss
Arguments:
regr (batch x max_objects x dim)
gt_regr (batch x max_objects x dim)
mask (batch x max_objects)
'''
num = mask.float().sum()
mask = mask.unsqueeze(2).expand_as(gt_regr).float()
regr = regr * mask
gt_regr = gt_regr * mask
regr_loss = nn.functional.smooth_l1_loss(regr, gt_regr, reduction='sum')
regr_loss = regr_loss / (num + 1e-4)
return regr_loss
class RegWeightedL1Loss(nn.Module):
def __init__(self):
super(RegWeightedL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, reduction='sum')
loss = loss / (mask.sum() + 1e-4)
return loss
class WeightedBCELoss(nn.Module):
def __init__(self):
super(WeightedBCELoss, self).__init__()
self.bceloss = torch.nn.BCEWithLogitsLoss(reduction='none')
def forward(self, output, mask, ind, target):
# output: B x F x H x W
# ind: B x M
# mask: B x M x F
# target: B x M x F
pred = _tranpose_and_gather_feat(output, ind) # B x M x F
loss = mask * self.bceloss(pred, target)
loss = loss.sum() / (mask.sum() + 1e-4)
return loss
class BinRotLoss(nn.Module):
def __init__(self):
super(BinRotLoss, self).__init__()
def forward(self, output, mask, ind, rotbin, rotres):
pred = _tranpose_and_gather_feat(output, ind)
loss = compute_rot_loss(pred, rotbin, rotres, mask)
return loss
def compute_res_loss(output, target):
return F.smooth_l1_loss(output, target, reduction='elementwise_mean')
def compute_bin_loss(output, target, mask):
mask = mask.expand_as(output)
output = output * mask.float()
return F.cross_entropy(output, target, reduction='elementwise_mean')
def compute_rot_loss(output, target_bin, target_res, mask):
# output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# target_bin: (B, 128, 2) [bin1_cls, bin2_cls]
# target_res: (B, 128, 2) [bin1_res, bin2_res]
# mask: (B, 128, 1)
output = output.view(-1, 8)
target_bin = target_bin.view(-1, 2)
target_res = target_res.view(-1, 2)
mask = mask.view(-1, 1)
loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)
loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)
loss_res = torch.zeros_like(loss_bin1)
if target_bin[:, 0].nonzero().shape[0] > 0:
idx1 = target_bin[:, 0].nonzero()[:, 0]
valid_output1 = torch.index_select(output, 0, idx1.long())
valid_target_res1 = torch.index_select(target_res, 0, idx1.long())
loss_sin1 = compute_res_loss(
valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))
loss_cos1 = compute_res_loss(
valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))
loss_res += loss_sin1 + loss_cos1
if target_bin[:, 1].nonzero().shape[0] > 0:
idx2 = target_bin[:, 1].nonzero()[:, 0]
valid_output2 = torch.index_select(output, 0, idx2.long())
valid_target_res2 = torch.index_select(target_res, 0, idx2.long())
loss_sin2 = compute_res_loss(
valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))
loss_cos2 = compute_res_loss(
valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))
loss_res += loss_sin2 + loss_cos2
return loss_bin1 + loss_bin2 + loss_res | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/model.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torchvision.models as models
import torch
import torch.nn as nn
import os
from .networks.dla import DLASeg
from .networks.resdcn import PoseResDCN
from .networks.resnet import PoseResNet
from .networks.dlav0 import DLASegv0
from .networks.generic_network import GenericNetwork
_network_factory = {
'resdcn': PoseResDCN,
'dla': DLASeg,
'res': PoseResNet,
'dlav0': DLASegv0,
'generic': GenericNetwork
}
def create_model(arch, head, head_conv, opt=None):
num_layers = int(arch[arch.find('_') + 1:]) if '_' in arch else 0
arch = arch[:arch.find('_')] if '_' in arch else arch
model_class = _network_factory[arch]
model = model_class(num_layers, heads=head, head_convs=head_conv, opt=opt)
return model
def load_model(model, model_path, opt, optimizer=None):
start_epoch = 0
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
print('loaded {}, epoch {}'.format(model_path, checkpoint['epoch']))
state_dict_ = checkpoint['state_dict']
state_dict = {}
# convert data_parallal to model
for k in state_dict_:
if k.startswith('module') and not k.startswith('module_list'):
state_dict[k[7:]] = state_dict_[k]
else:
state_dict[k] = state_dict_[k]
model_state_dict = model.state_dict()
# check loaded parameters and created model parameters
for k in state_dict:
if k in model_state_dict:
if (state_dict[k].shape != model_state_dict[k].shape) or \
(opt.reset_hm and k.startswith('hm') and (state_dict[k].shape[0] in [80, 1])):
if opt.reuse_hm:
print('Reusing parameter {}, required shape{}, '\
'loaded shape{}.'.format(
k, model_state_dict[k].shape, state_dict[k].shape))
if state_dict[k].shape[0] < state_dict[k].shape[0]:
model_state_dict[k][:state_dict[k].shape[0]] = state_dict[k]
else:
model_state_dict[k] = state_dict[k][:model_state_dict[k].shape[0]]
state_dict[k] = model_state_dict[k]
else:
print('Skip loading parameter {}, required shape{}, '\
'loaded shape{}.'.format(
k, model_state_dict[k].shape, state_dict[k].shape))
state_dict[k] = model_state_dict[k]
else:
print('Drop parameter {}.'.format(k))
for k in model_state_dict:
if not (k in state_dict):
print('No param {}.'.format(k))
state_dict[k] = model_state_dict[k]
model.load_state_dict(state_dict, strict=False)
# resume optimizer parameters
if optimizer is not None and opt.resume:
if 'optimizer' in checkpoint:
# optimizer.load_state_dict(checkpoint['optimizer'])
start_epoch = checkpoint['epoch']
start_lr = opt.lr
for step in opt.lr_step:
if start_epoch >= step:
start_lr *= 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = start_lr
print('Resumed optimizer with start lr', start_lr)
else:
print('No optimizer parameters in checkpoint.')
if optimizer is not None:
return model, optimizer, start_epoch
else:
return model
def save_model(path, epoch, model, optimizer=None):
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
data = {'epoch': epoch,
'state_dict': state_dict}
if not (optimizer is None):
data['optimizer'] = optimizer.state_dict()
torch.save(data, path)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/backbones/dla.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels,
block=BasicBlock, residual_root=False,
opt=None):
super(DLA, self).__init__()
self.channels = channels
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if opt.pre_img:
print('adding pre_img layer...')
self.pre_img_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
if opt.pre_hm:
print('adding pre_hm layer...')
self.pre_hm_layer = nn.Sequential(
nn.Conv2d(1, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x, pre_img=None, pre_hm=None):
y = []
x = self.base_layer(x)
if pre_img is not None:
x = x + self.pre_img_layer(pre_img)
if pre_hm is not None:
x = x + self.pre_hm_layer(pre_hm)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights, strict=False)
model_dict = {
'dla34': (
[1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
'ba72cf86'),
'dla102': (
[1, 1, 1, 3, 4, 1],
[16, 32, 128, 256, 512, 1024],
'd94d9790'),
'dla46_c': (
[1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
'2bfd52c3'),
'dla46x_c': (
[1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
'd761bae7'),
'dla60x_c': (
[1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
'b870c45c'),
'dla60': (
[1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
'24839fc4'),
'dla60x': (
[1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
'd15cacda'),
'dla102x': (
[1, 1, 1, 3, 4, 1],
[16, 32, 128, 256, 512, 1024],
'ad62be81'),
'dla102x2': (
[1, 1, 1, 3, 4, 1],
[16, 32, 128, 256, 512, 1024],
'262837b6'),
'dla169': (
[1, 1, 2, 3, 5, 1],
[16, 32, 128, 256, 512, 1024],
'0914e092'
)
}
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(
data='imagenet', name='dla34', hash='ba72cf86')
else:
print('Warning: No ImageNet pretrain!!')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(
data='imagenet', name='dla102', hash='d94d9790')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla46_c', hash='2bfd52c3')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla46x_c', hash='d761bae7')
return model
def dla60x_c(pretrained=None, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60', hash='24839fc4')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60x', hash='d15cacda')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla102x', hash='ad62be81')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla102x2', hash='262837b6')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla169', hash='0914e092')
return model
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/backbones/mobilenet.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from torchvision.models.utils import load_state_dict_from_url
BN_MOMENTUM = 0.1
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, opt,
width_mult=1.0,
round_nearest=8,
block=None):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
"""
super().__init__()
if block is None:
block = InvertedResidual
input_channel = 32
last_channel = 1280
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1], # 1
[6, 24, 2, 2], # 2
[6, 32, 3, 2], # 3
[6, 64, 4, 2], # 4
[6, 96, 3, 1], # 5
[6, 160, 3, 2],# 6
[6, 320, 1, 1],# 7
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
# self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
if opt.pre_img:
print('adding pre_img layer...')
self.pre_img_layer = nn.Sequential(
nn.Conv2d(3, input_channel, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(input_channel))
if opt.pre_hm:
print('adding pre_hm layer...')
self.pre_hm_layer = nn.Sequential(
nn.Conv2d(1, input_channel, kernel_size=3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(input_channel))
features = [ConvBNReLU(3, input_channel, stride=2)]
self.key_block = [True]
all_channels = [input_channel]
self.channels = [input_channel]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
if stride == 2:
self.key_block.append(True)
else:
self.key_block.append(False)
all_channels.append(output_channel)
# building last several layers
# features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# self.key_block.append(False)
# all_channels.append(self.last_channel)
for i in range(len(self.key_block) - 1):
if self.key_block[i + 1]:
self.key_block[i] = True
self.key_block[i + 1] = False
self.channels.append(all_channels[i])
self.key_block[-1] = True
self.channels.append(all_channels[-1])
print('channels', self.channels)
# make it nn.Sequential
self.features = nn.ModuleList(features)
print('len(self.features)', len(self.features))
# self.channels = [, ]
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'])
self.load_state_dict(state_dict, strict=False)
def forward(self, inputs, pre_img=None, pre_hm=None):
x = self.features[0](inputs)
if pre_img is not None:
x = x + self.pre_img_layer(pre_img)
if pre_hm is not None:
x = x + self.pre_hm_layer(pre_hm)
y = [x]
for i in range(1, len(self.features)):
x = self.features[i](x)
# print('i, shape, is_key', i, x.shape, self.key_block[i])
if self.key_block[i]:
y.append(x)
return y
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/backbones/resnet.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
class Resnet(nn.Module):
def __init__(self, opt):
super().__init__()
assert (not opt.pre_hm) and (not opt.pre_img)
self.inplanes = 64
block, layers = resnet_spec[opt.num_layers]
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.channels = [64, 64,
64 * block.expansion,
128 * block.expansion,
256 * block.expansion,
512 * block.expansion]
self._init_weights(opt.num_layers)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
y = [x]
x = self.maxpool(x)
y.append(x)
x = self.layer1(x)
y.append(x)
x = self.layer2(x)
y.append(x)
x = self.layer3(x)
y.append(x)
x = self.layer4(x)
y.append(x)
return y
def _init_weights(self, num_layers):
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False) | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/base_model.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch import nn
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class BaseModel(nn.Module):
def __init__(self, heads, head_convs, num_stacks, last_channel, opt=None):
super(BaseModel, self).__init__()
if opt is not None and opt.head_kernel != 3:
print('Using head kernel:', opt.head_kernel)
head_kernel = opt.head_kernel
else:
head_kernel = 3
self.num_stacks = num_stacks
self.heads = heads
for head in self.heads:
classes = self.heads[head]
head_conv = head_convs[head]
if len(head_conv) > 0:
out = nn.Conv2d(head_conv[-1], classes,
kernel_size=1, stride=1, padding=0, bias=True)
conv = nn.Conv2d(last_channel, head_conv[0],
kernel_size=head_kernel,
padding=head_kernel // 2, bias=True)
convs = [conv]
for k in range(1, len(head_conv)):
convs.append(nn.Conv2d(head_conv[k - 1], head_conv[k],
kernel_size=1, bias=True))
if len(convs) == 1:
fc = nn.Sequential(conv, nn.ReLU(inplace=True), out)
elif len(convs) == 2:
fc = nn.Sequential(
convs[0], nn.ReLU(inplace=True),
convs[1], nn.ReLU(inplace=True), out)
elif len(convs) == 3:
fc = nn.Sequential(
convs[0], nn.ReLU(inplace=True),
convs[1], nn.ReLU(inplace=True),
convs[2], nn.ReLU(inplace=True), out)
elif len(convs) == 4:
fc = nn.Sequential(
convs[0], nn.ReLU(inplace=True),
convs[1], nn.ReLU(inplace=True),
convs[2], nn.ReLU(inplace=True),
convs[3], nn.ReLU(inplace=True), out)
if 'hm' in head:
fc[-1].bias.data.fill_(opt.prior_bias)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(last_channel, classes,
kernel_size=1, stride=1, padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(opt.prior_bias)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def img2feats(self, x):
raise NotImplementedError
def imgpre2feats(self, x, pre_img=None, pre_hm=None):
raise NotImplementedError
def forward(self, x, pre_img=None, pre_hm=None):
if (pre_hm is not None) or (pre_img is not None):
feats = self.imgpre2feats(x, pre_img, pre_hm)
else:
feats = self.img2feats(x)
out = []
if self.opt.model_output_list:
for s in range(self.num_stacks):
z = []
for head in sorted(self.heads):
z.append(self.__getattr__(head)(feats[s]))
out.append(z)
else:
for s in range(self.num_stacks):
z = {}
for head in self.heads:
z[head] = self.__getattr__(head)(feats[s])
out.append(z)
return out
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/dla.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from .base_model import BaseModel
try:
from .DCNv2.dcn_v2 import DCN
except:
print('import DCN failed')
DCN = None
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = nn.BatchNorm2d(bottle_planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels, momentum=BN_MOMENTUM)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, num_classes=1000,
block=BasicBlock, residual_root=False, linear_root=False,
opt=None):
super(DLA, self).__init__()
self.channels = channels
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
if opt.pre_img:
self.pre_img_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
if opt.pre_hm:
self.pre_hm_layer = nn.Sequential(
nn.Conv2d(1, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
nn.BatchNorm2d(channels[0], momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
nn.BatchNorm2d(planes, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x, pre_img=None, pre_hm=None):
y = []
x = self.base_layer(x)
if pre_img is not None:
x = x + self.pre_img_layer(pre_img)
if pre_hm is not None:
x = x + self.pre_hm_layer(pre_hm)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
# fc = self.fc
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
num_classes = len(model_weights[list(model_weights.keys())[-1]])
self.fc = nn.Conv2d(
self.channels[-1], num_classes,
kernel_size=1, stride=1, padding=0, bias=True)
self.load_state_dict(model_weights, strict=False)
# self.fc = fc
def dla34(pretrained=True, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(
data='imagenet', name='dla34', hash='ba72cf86')
else:
print('Warning: No ImageNet pretrain!!')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained:
model.load_pretrained_model(
data='imagenet', name='dla102', hash='d94d9790')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla46_c', hash='2bfd52c3')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla46x_c', hash='d761bae7')
return model
def dla60x_c(pretrained=None, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60', hash='24839fc4')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla60x', hash='d15cacda')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla102x', hash='ad62be81')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla102x2', hash='262837b6')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(
data='imagenet', name='dla169', hash='0914e092')
return model
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class Conv(nn.Module):
def __init__(self, chi, cho):
super(Conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
def forward(self, x):
return self.conv(x)
class GlobalConv(nn.Module):
def __init__(self, chi, cho, k=7, d=1):
super(GlobalConv, self).__init__()
gcl = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=(k, 1), stride=1, bias=False,
dilation=d, padding=(d * (k // 2), 0)),
nn.Conv2d(cho, cho, kernel_size=(1, k), stride=1, bias=False,
dilation=d, padding=(0, d * (k // 2))))
gcr = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=(1, k), stride=1, bias=False,
dilation=d, padding=(0, d * (k // 2))),
nn.Conv2d(cho, cho, kernel_size=(k, 1), stride=1, bias=False,
dilation=d, padding=(d * (k // 2), 0)))
fill_fc_weights(gcl)
fill_fc_weights(gcr)
self.gcl = gcl
self.gcr = gcr
self.act = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.gcl(x) + self.gcr(x)
x = self.act(x)
return x
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f, node_type=(DeformConv, DeformConv)):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = node_type[0](c, o)
node = node_type[1](o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None,
node_type=DeformConv):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j],
node_type=node_type))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
class Interpolate(nn.Module):
def __init__(self, scale, mode):
super(Interpolate, self).__init__()
self.scale = scale
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale, mode=self.mode, align_corners=False)
return x
DLA_NODE = {
'dcn': (DeformConv, DeformConv),
'gcn': (Conv, GlobalConv),
'conv': (Conv, Conv),
}
class DLASeg(BaseModel):
def __init__(self, num_layers, heads, head_convs, opt):
super(DLASeg, self).__init__(
heads, head_convs, 1, 64 if num_layers == 34 else 128, opt=opt)
down_ratio=4
self.opt = opt
self.node_type = DLA_NODE[opt.dla_node]
print('Using node type:', self.node_type)
self.first_level = int(np.log2(down_ratio))
self.last_level = 5
self.base = globals()['dla{}'.format(num_layers)](
pretrained=(opt.load_model == ''), opt=opt)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(
self.first_level, channels[self.first_level:], scales,
node_type=self.node_type)
out_channel = channels[self.first_level]
self.ida_up = IDAUp(
out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)],
node_type=self.node_type)
def img2feats(self, x):
x = self.base(x)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
return [y[-1]]
def imgpre2feats(self, x, pre_img=None, pre_hm=None):
x = self.base(x, pre_img, pre_hm)
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
return [y[-1]]
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/dlav0.py | Python | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from os.path import join
import torch
from torch import nn
import torch.utils.model_zoo as model_zoo
from .base_model import BaseModel
import numpy as np
BatchNorm = nn.BatchNorm2d
def get_model_url(data='imagenet', name='dla34', hash='ba72cf86'):
return join('http://dl.yf.io/dla/models', data, '{}-{}.pth'.format(name, hash))
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(planes)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(Bottleneck, self).__init__()
expansion = Bottleneck.expansion
bottle_planes = planes // expansion
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation,
bias=False, dilation=dilation)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class BottleneckX(nn.Module):
expansion = 2
cardinality = 32
def __init__(self, inplanes, planes, stride=1, dilation=1):
super(BottleneckX, self).__init__()
cardinality = BottleneckX.cardinality
# dim = int(math.floor(planes * (BottleneckV5.expansion / 64.0)))
# bottle_planes = dim * cardinality
bottle_planes = planes * cardinality // 32
self.conv1 = nn.Conv2d(inplanes, bottle_planes,
kernel_size=1, bias=False)
self.bn1 = BatchNorm(bottle_planes)
self.conv2 = nn.Conv2d(bottle_planes, bottle_planes, kernel_size=3,
stride=stride, padding=dilation, bias=False,
dilation=dilation, groups=cardinality)
self.bn2 = BatchNorm(bottle_planes)
self.conv3 = nn.Conv2d(bottle_planes, planes,
kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
def forward(self, x, residual=None):
if residual is None:
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += residual
out = self.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, residual):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, 1,
stride=1, bias=False, padding=(kernel_size - 1) // 2)
self.bn = BatchNorm(out_channels)
self.relu = nn.ReLU(inplace=True)
self.residual = residual
def forward(self, *x):
children = x
x = self.conv(torch.cat(x, 1))
x = self.bn(x)
if self.residual:
x += children[0]
x = self.relu(x)
return x
class Tree(nn.Module):
def __init__(self, levels, block, in_channels, out_channels, stride=1,
level_root=False, root_dim=0, root_kernel_size=1,
dilation=1, root_residual=False):
super(Tree, self).__init__()
if root_dim == 0:
root_dim = 2 * out_channels
if level_root:
root_dim += in_channels
if levels == 1:
self.tree1 = block(in_channels, out_channels, stride,
dilation=dilation)
self.tree2 = block(out_channels, out_channels, 1,
dilation=dilation)
else:
self.tree1 = Tree(levels - 1, block, in_channels, out_channels,
stride, root_dim=0,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
self.tree2 = Tree(levels - 1, block, out_channels, out_channels,
root_dim=root_dim + out_channels,
root_kernel_size=root_kernel_size,
dilation=dilation, root_residual=root_residual)
if levels == 1:
self.root = Root(root_dim, out_channels, root_kernel_size,
root_residual)
self.level_root = level_root
self.root_dim = root_dim
self.downsample = None
self.project = None
self.levels = levels
if stride > 1:
self.downsample = nn.MaxPool2d(stride, stride=stride)
if in_channels != out_channels:
self.project = nn.Sequential(
nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_channels)
)
def forward(self, x, residual=None, children=None):
children = [] if children is None else children
bottom = self.downsample(x) if self.downsample else x
residual = self.project(bottom) if self.project else bottom
if self.level_root:
children.append(bottom)
x1 = self.tree1(x, residual)
if self.levels == 1:
x2 = self.tree2(x1)
x = self.root(x2, x1, *children)
else:
children.append(x1)
x = self.tree2(x1, children=children)
return x
class DLA(nn.Module):
def __init__(self, levels, channels, opt, num_classes=1000,
block=BasicBlock, residual_root=False, return_levels=True,
pool_size=7, linear_root=False):
super(DLA, self).__init__()
self.channels = channels
self.return_levels = True
self.num_classes = num_classes
self.base_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
self.level0 = self._make_conv_level(
channels[0], channels[0], levels[0])
self.level1 = self._make_conv_level(
channels[0], channels[1], levels[1], stride=2)
self.level2 = Tree(levels[2], block, channels[1], channels[2], 2,
level_root=False,
root_residual=residual_root)
self.level3 = Tree(levels[3], block, channels[2], channels[3], 2,
level_root=True, root_residual=residual_root)
self.level4 = Tree(levels[4], block, channels[3], channels[4], 2,
level_root=True, root_residual=residual_root)
self.level5 = Tree(levels[5], block, channels[4], channels[5], 2,
level_root=True, root_residual=residual_root)
if opt.pre_img:
self.pre_img_layer = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
if opt.pre_hm:
self.pre_hm_layer = nn.Sequential(
nn.Conv2d(1, channels[0], kernel_size=7, stride=1,
padding=3, bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_level(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes:
downsample = nn.Sequential(
nn.MaxPool2d(stride, stride=stride),
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=1, bias=False),
BatchNorm(planes),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample=downsample))
for i in range(1, blocks):
layers.append(block(inplanes, planes))
return nn.Sequential(*layers)
def _make_conv_level(self, inplanes, planes, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(inplanes, planes, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(planes),
nn.ReLU(inplace=True)])
inplanes = planes
return nn.Sequential(*modules)
def forward(self, x, pre_img=None, pre_hm=None):
y = []
x = self.base_layer(x)
if pre_img is not None:
x = x + self.pre_img_layer(pre_img)
if pre_hm is not None:
x = x + self.pre_hm_layer(pre_hm)
for i in range(6):
x = getattr(self, 'level{}'.format(i))(x)
y.append(x)
return y
def load_pretrained_model(self, data='imagenet', name='dla34', hash='ba72cf86'):
if name.endswith('.pth'):
model_weights = torch.load(data + name)
else:
model_url = get_model_url(data, name, hash)
model_weights = model_zoo.load_url(model_url)
def dla34(pretrained, **kwargs): # DLA-34
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 128, 256, 512],
block=BasicBlock, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla34', hash='ba72cf86')
return model
def dla46_c(pretrained=None, **kwargs): # DLA-46-C
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46_c')
return model
def dla46x_c(pretrained=None, **kwargs): # DLA-X-46-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 2, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla46x_c')
return model
def dla60x_c(pretrained, **kwargs): # DLA-X-60-C
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 64, 64, 128, 256],
block=BottleneckX, **kwargs)
if pretrained:
model.load_pretrained_model(data='imagenet', name='dla60x_c', hash='b870c45c')
return model
def dla60(pretrained=None, **kwargs): # DLA-60
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=Bottleneck, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60')
return model
def dla60x(pretrained=None, **kwargs): # DLA-X-60
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 2, 3, 1],
[16, 32, 128, 256, 512, 1024],
block=BottleneckX, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla60x')
return model
def dla102(pretrained=None, **kwargs): # DLA-102
Bottleneck.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102')
return model
def dla102x(pretrained=None, **kwargs): # DLA-X-102
BottleneckX.expansion = 2
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x')
return model
def dla102x2(pretrained=None, **kwargs): # DLA-X-102 64
BottleneckX.cardinality = 64
model = DLA([1, 1, 1, 3, 4, 1], [16, 32, 128, 256, 512, 1024],
block=BottleneckX, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla102x2')
return model
def dla169(pretrained=None, **kwargs): # DLA-169
Bottleneck.expansion = 2
model = DLA([1, 1, 2, 3, 5, 1], [16, 32, 128, 256, 512, 1024],
block=Bottleneck, residual_root=True, **kwargs)
if pretrained is not None:
model.load_pretrained_model(pretrained, 'dla169')
return model
def set_bn(bn):
global BatchNorm
BatchNorm = bn
dla.BatchNorm = bn
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class IDAUp(nn.Module):
def __init__(self, node_kernel, out_dim, channels, up_factors):
super(IDAUp, self).__init__()
self.channels = channels
self.out_dim = out_dim
for i, c in enumerate(channels):
if c == out_dim:
proj = Identity()
else:
proj = nn.Sequential(
nn.Conv2d(c, out_dim,
kernel_size=1, stride=1, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
f = int(up_factors[i])
if f == 1:
up = Identity()
else:
up = nn.ConvTranspose2d(
out_dim, out_dim, f * 2, stride=f, padding=f // 2,
output_padding=0, groups=out_dim, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
for i in range(1, len(channels)):
node = nn.Sequential(
nn.Conv2d(out_dim * 2, out_dim,
kernel_size=node_kernel, stride=1,
padding=node_kernel // 2, bias=False),
BatchNorm(out_dim),
nn.ReLU(inplace=True))
setattr(self, 'node_' + str(i), node)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, layers):
assert len(self.channels) == len(layers), \
'{} vs {} layers'.format(len(self.channels), len(layers))
layers = list(layers)
for i, l in enumerate(layers):
upsample = getattr(self, 'up_' + str(i))
project = getattr(self, 'proj_' + str(i))
layers[i] = upsample(project(l))
x = layers[0]
y = []
for i in range(1, len(layers)):
node = getattr(self, 'node_' + str(i))
x = node(torch.cat([x, layers[i]], 1))
y.append(x)
return x, y
class DLAUp(nn.Module):
def __init__(self, channels, scales=(1, 2, 4, 8, 16), in_channels=None):
super(DLAUp, self).__init__()
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(3, channels[j], in_channels[j:],
scales[j:] // scales[j]))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
layers = list(layers)
assert len(layers) > 1
for i in range(len(layers) - 1):
ida = getattr(self, 'ida_{}'.format(i))
x, y = ida(layers[-i - 2:])
layers[-i - 1:] = y
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class DLASegv0(BaseModel):
def __init__(self, num_layers, heads, head_convs, opt):
super(DLASegv0, self).__init__(heads, head_convs, 1, 64, opt=opt)
down_ratio=4
self.opt = opt
self.heads = heads
self.first_level = int(np.log2(down_ratio))
self.base = globals()['dla{}'.format(num_layers)](
pretrained=True, opt=opt)
channels = self.base.channels
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(channels[self.first_level:], scales=scales)
def img2feats(self, x):
x = self.base(x)
x = self.dla_up(x[self.first_level:])
return [x]
def imgpre2feats(self, x, pre_img=None, pre_hm=None):
x = self.base(x, pre_img, pre_hm)
x = self.dla_up(x[self.first_level:])
return [x]
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/generic_network.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch import nn
from .backbones.dla import dla34
from .backbones.resnet import Resnet
from .backbones.mobilenet import MobileNetV2
from .necks.dlaup import DLASeg
from .necks.msraup import MSRAUp
backbone_factory = {
'dla34': dla34,
'resnet': Resnet,
'mobilenet': MobileNetV2
}
neck_factory = {
'dlaup': DLASeg,
'msraup': MSRAUp
}
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class GenericNetwork(nn.Module):
def __init__(self, num_layers, heads, head_convs, num_stacks=1, opt=None):
super(GenericNetwork, self).__init__()
print('Using generic model with backbone {} and neck {}'.format(
opt.backbone, opt.neck))
# assert (not opt.pre_hm) and (not opt.pre_img)
if opt is not None and opt.head_kernel != 3:
print('Using head kernel:', opt.head_kernel)
head_kernel = opt.head_kernel
else:
head_kernel = 3
self.opt = opt
self.backbone = backbone_factory[opt.backbone](opt=opt)
channels = self.backbone.channels
self.neck = neck_factory[opt.neck](opt=opt, channels=channels)
last_channel = self.neck.out_channel
self.num_stacks = num_stacks
self.heads = heads
for head in self.heads:
classes = self.heads[head]
head_conv = head_convs[head]
if len(head_conv) > 0:
out = nn.Conv2d(head_conv[-1], classes,
kernel_size=1, stride=1, padding=0, bias=True)
conv = nn.Conv2d(last_channel, head_conv[0],
kernel_size=head_kernel,
padding=head_kernel // 2, bias=True)
convs = [conv]
for k in range(1, len(head_conv)):
convs.append(nn.Conv2d(head_conv[k - 1], head_conv[k],
kernel_size=1, bias=True))
if len(convs) == 1:
fc = nn.Sequential(conv, nn.ReLU(inplace=True), out)
elif len(convs) == 2:
fc = nn.Sequential(
convs[0], nn.ReLU(inplace=True),
convs[1], nn.ReLU(inplace=True), out)
elif len(convs) == 3:
fc = nn.Sequential(
convs[0], nn.ReLU(inplace=True),
convs[1], nn.ReLU(inplace=True),
convs[2], nn.ReLU(inplace=True), out)
elif len(convs) == 4:
fc = nn.Sequential(
convs[0], nn.ReLU(inplace=True),
convs[1], nn.ReLU(inplace=True),
convs[2], nn.ReLU(inplace=True),
convs[3], nn.ReLU(inplace=True), out)
if 'hm' in head:
fc[-1].bias.data.fill_(opt.prior_bias)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(last_channel, classes,
kernel_size=1, stride=1, padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(opt.prior_bias)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def forward(self, x, pre_img=None, pre_hm=None):
y = self.backbone(x, pre_img, pre_hm)
feats = self.neck(y)
out = []
if self.opt.model_output_list:
for s in range(self.num_stacks):
z = []
for head in sorted(self.heads):
z.append(self.__getattr__(head)(feats[s]))
out.append(z)
else:
for s in range(self.num_stacks):
z = {}
for head in self.heads:
z[head] = self.__getattr__(head)(feats[s])
out.append(z)
return out
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/necks/dlaup.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import numpy as np
from os.path import join
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
try:
from ..DCNv2.dcn_v2 import DCN
except:
print('import DCN failed')
DCN = None
BN_MOMENTUM = 0.1
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
class Conv(nn.Module):
def __init__(self, chi, cho):
super(Conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True))
def forward(self, x):
return self.conv(x)
class GlobalConv(nn.Module):
def __init__(self, chi, cho, k=7, d=1):
super(GlobalConv, self).__init__()
gcl = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=(k, 1), stride=1, bias=False,
dilation=d, padding=(d * (k // 2), 0)),
nn.Conv2d(cho, cho, kernel_size=(1, k), stride=1, bias=False,
dilation=d, padding=(0, d * (k // 2))))
gcr = nn.Sequential(
nn.Conv2d(chi, cho, kernel_size=(1, k), stride=1, bias=False,
dilation=d, padding=(0, d * (k // 2))),
nn.Conv2d(cho, cho, kernel_size=(k, 1), stride=1, bias=False,
dilation=d, padding=(d * (k // 2), 0)))
fill_fc_weights(gcl)
fill_fc_weights(gcr)
self.gcl = gcl
self.gcr = gcr
self.act = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.gcl(x) + self.gcr(x)
x = self.act(x)
return x
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3,3), stride=1, padding=1, dilation=1, deformable_groups=1)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
class IDAUp(nn.Module):
def __init__(self, o, channels, up_f, node_type=(DeformConv, DeformConv)):
super(IDAUp, self).__init__()
for i in range(1, len(channels)):
c = channels[i]
f = int(up_f[i])
proj = node_type[0](c, o)
node = node_type[1](o, o)
up = nn.ConvTranspose2d(o, o, f * 2, stride=f,
padding=f // 2, output_padding=0,
groups=o, bias=False)
fill_up_weights(up)
setattr(self, 'proj_' + str(i), proj)
setattr(self, 'up_' + str(i), up)
setattr(self, 'node_' + str(i), node)
def forward(self, layers, startp, endp):
for i in range(startp + 1, endp):
upsample = getattr(self, 'up_' + str(i - startp))
project = getattr(self, 'proj_' + str(i - startp))
layers[i] = upsample(project(layers[i]))
node = getattr(self, 'node_' + str(i - startp))
layers[i] = node(layers[i] + layers[i - 1])
class DLAUp(nn.Module):
def __init__(self, startp, channels, scales, in_channels=None,
node_type=DeformConv):
super(DLAUp, self).__init__()
self.startp = startp
if in_channels is None:
in_channels = channels
self.channels = channels
channels = list(channels)
scales = np.array(scales, dtype=int)
for i in range(len(channels) - 1):
j = -i - 2
setattr(self, 'ida_{}'.format(i),
IDAUp(channels[j], in_channels[j:],
scales[j:] // scales[j],
node_type=node_type))
scales[j + 1:] = scales[j]
in_channels[j + 1:] = [channels[j] for _ in channels[j + 1:]]
def forward(self, layers):
out = [layers[-1]] # start with 32
for i in range(len(layers) - self.startp - 1):
ida = getattr(self, 'ida_{}'.format(i))
ida(layers, len(layers) -i - 2, len(layers))
out.insert(0, layers[-1])
return out
DLA_NODE = {
'dcn': (DeformConv, DeformConv),
'gcn': (Conv, GlobalConv),
'conv': (Conv, Conv),
}
class DLASeg(nn.Module):
def __init__(self, opt, channels):
super().__init__()
self.opt = opt
self.channels = channels
self.node_type = DLA_NODE[opt.dla_node]
print('Using node type:', self.node_type)
down_ratio = 4
self.first_level = int(np.log2(down_ratio))
self.last_level = 5
scales = [2 ** i for i in range(len(channels[self.first_level:]))]
self.dla_up = DLAUp(
self.first_level, channels[self.first_level:], scales,
node_type=self.node_type)
self.out_channel = channels[self.first_level]
self.ida_up = IDAUp(
self.out_channel, channels[self.first_level:self.last_level],
[2 ** i for i in range(self.last_level - self.first_level)],
node_type=self.node_type)
def forward(self, x):
x = self.dla_up(x)
y = []
for i in range(self.last_level - self.first_level):
y.append(x[i].clone())
self.ida_up(y, 0, len(y))
return [y[-1]]
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/necks/msraup.py | Python | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
try:
from ..DCNv2.dcn_v2 import DCN
except:
print('import DCN failed')
DCN = None
BN_MOMENTUM = 0.1
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class MSRAUp(nn.Module):
# def __init__(self, block, layers, heads, head_conv):
def __init__(self, opt, channels):
super().__init__()
self.opt = opt
assert self.opt.msra_outchannel in [64, 256]
self.deconv_with_bias = False
self.inplanes = channels[-1]
self.out_channel = self.opt.msra_outchannel
# used for deconv layers
if self.opt.msra_outchannel == 64:
print('Using slimed resnet: 256 128 64 up channels.')
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
else:
print('Using original resnet: 256 256 256 up channels.')
print('Using 256 deconvs')
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
self.init_weights()
def forward(self, x):
x = self.deconv_layers(x[-1])
return [x]
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = DCN(self.inplanes, planes,
kernel_size=(3,3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def init_weights(self):
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/resdcn.py | Python | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
try:
from .DCNv2.dcn_v2 import DCN
except:
print('Import DCN failed')
DCN = None
import torch.utils.model_zoo as model_zoo
from .base_model import BaseModel
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
class PoseResDCN(BaseModel):
# def __init__(self, block, layers, heads, head_conv):
def __init__(self, num_layers, heads, head_convs, _):
assert head_convs['hm'][0] in [64, 256]
super(PoseResDCN, self).__init__(
heads, head_convs, 1, head_convs['hm'][0], opt=_)
block, layers = resnet_spec[num_layers]
self.inplanes = 64
self.deconv_with_bias = False
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
if head_convs['hm'][0] == 64:
print('Using slimed resnet: 256 128 64 up channels.')
self.deconv_layers = self._make_deconv_layer(
3,
[256, 128, 64],
[4, 4, 4],
)
else:
print('Using original resnet: 256 256 256 up channels.')
print('Using 256 deconvs')
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
self.init_weights(num_layers, _.rgb)
def img2feats(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
return [x]
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
fc = DCN(self.inplanes, planes,
kernel_size=(3,3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def init_weights(self, num_layers, rgb=False):
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
if rgb:
print('shuffle ImageNet pretrained model from RGB to BGR')
self.base.base_layer[0].weight.data[:, 0], \
self.base.base_layer[0].weight.data[:, 2] = \
self.base.base_layer[0].weight.data[:, 2].clone(), \
self.base.base_layer[0].weight.data[:, 0].clone()
print('=> init deconv weights from normal distribution')
for name, m in self.deconv_layers.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/networks/resnet.py | Python | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from .base_model import BaseModel
BN_MOMENTUM = 0.1
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
class PoseResNet(nn.Module):
def __init__(self, num_layers, heads, head_convs, _):
super(PoseResNet, self).__init__(heads, head_convs, 1, 64)
block, layers = resnet_spec[num_layers]
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layers = self._make_deconv_layer(
3,
[256, 256, 256],
[4, 4, 4],
)
self.init_weights(num_layers, pretrained=True)
def img2feats(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
return [x]
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert num_layers == len(num_filters), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert num_layers == len(num_kernels), \
'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(
nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def init_weights(self, num_layers, pretrained=True):
if pretrained:
# print('=> init resnet deconv weights from normal distribution')
for _, m in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
# print('=> init {}.weight as 1'.format(name))
# print('=> init {}.bias as 0'.format(name))
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# print('=> init final conv weights from normal distribution')
for head in self.heads:
final_layer = self.__getattr__(head)
for i, m in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# print('=> init {}.weight as normal(0, 0.001)'.format(name))
# print('=> init {}.bias as 0'.format(name))
if m.weight.shape[0] == self.heads[head]:
if 'hm' in head:
nn.init.constant_(m.bias, -2.19)
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
#pretrained_state_dict = torch.load(pretrained)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist')
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/scatter_gather.py | Python | import torch
from torch.autograd import Variable
from torch.nn.parallel._functions import Scatter, Gather
def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
r"""
Slices variables into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not variables. Does not
support Tensors.
"""
def scatter_map(obj):
if isinstance(obj, Variable):
return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
assert not torch.is_tensor(obj), "Tensors not supported in scatter."
if isinstance(obj, tuple):
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list):
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict):
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
return scatter_map(inputs)
def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else []
kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/model/utils.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _sigmoid12(x):
y = torch.clamp(x.sigmoid_(), 1e-12)
return y
def _gather_feat(feat, ind):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def flip_tensor(x):
return torch.flip(x, [3])
# tmp = x.detach().cpu().numpy()[..., ::-1].copy()
# return torch.from_numpy(tmp).to(x.device)
def flip_lr(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
def flip_lr_off(x, flip_idx):
tmp = x.detach().cpu().numpy()[..., ::-1].copy()
shape = tmp.shape
tmp = tmp.reshape(tmp.shape[0], 17, 2,
tmp.shape[2], tmp.shape[3])
tmp[:, :, 0, :, :] *= -1
for e in flip_idx:
tmp[:, e[0], ...], tmp[:, e[1], ...] = \
tmp[:, e[1], ...].copy(), tmp[:, e[0], ...].copy()
return torch.from_numpy(tmp.reshape(shape)).to(x.device)
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _topk_channel(scores, K=100):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_ys, topk_xs
def _topk(scores, K=100):
batch, cat, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/opts.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
class opts(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
# basic experiment setting
self.parser.add_argument('task', default='',
help='ctdet | ddd | multi_pose '
'| tracking or combined with ,')
self.parser.add_argument('--dataset', default='coco',
help='see lib/dataset/dataset_facotry for ' +
'available datasets')
self.parser.add_argument('--test_dataset', default='',
help='coco | kitti | coco_hp | pascal')
self.parser.add_argument('--exp_id', default='default')
self.parser.add_argument('--test', action='store_true')
self.parser.add_argument('--debug', type=int, default=0,
help='level of visualization.'
'1: only show the final detection results'
'2: show the network output features'
'3: use matplot to display' # useful when lunching training with ipython notebook
'4: save all visualizations to disk')
self.parser.add_argument('--no_pause', action='store_true')
self.parser.add_argument('--demo', default='',
help='path to image/ image folders/ video. '
'or "webcam"')
self.parser.add_argument('--load_model', default='',
help='path to pretrained model')
self.parser.add_argument('--resume', action='store_true',
help='resume an experiment. '
'Reloaded the optimizer parameter and '
'set load_model to model_last.pth '
'in the exp dir if load_model is empty.')
# system
self.parser.add_argument('--gpus', default='0',
help='-1 for CPU, use comma for multiple gpus')
self.parser.add_argument('--num_workers', type=int, default=4,
help='dataloader threads. 0 for single-thread.')
self.parser.add_argument('--not_cuda_benchmark', action='store_true',
help='disable when the input size is not fixed.')
self.parser.add_argument('--seed', type=int, default=317,
help='random seed') # from CornerNet
self.parser.add_argument('--not_set_cuda_env', action='store_true',
help='used when training in slurm clusters.')
# log
self.parser.add_argument('--print_iter', type=int, default=0,
help='disable progress bar and print to screen.')
self.parser.add_argument('--save_all', action='store_true',
help='save model to disk every 5 epochs.')
self.parser.add_argument('--vis_thresh', type=float, default=0.3,
help='visualization threshold.')
self.parser.add_argument('--debugger_theme', default='white',
choices=['white', 'black'])
self.parser.add_argument('--eval_val', action='store_true')
self.parser.add_argument('--save_imgs', default='', help='')
self.parser.add_argument('--save_img_suffix', default='', help='')
self.parser.add_argument('--skip_first', type=int, default=-1, help='')
self.parser.add_argument('--save_video', action='store_true')
self.parser.add_argument('--save_framerate', type=int, default=30)
self.parser.add_argument('--resize_video', action='store_true')
self.parser.add_argument('--video_h', type=int, default=512, help='')
self.parser.add_argument('--video_w', type=int, default=512, help='')
self.parser.add_argument('--transpose_video', action='store_true')
self.parser.add_argument('--show_track_color', action='store_true')
self.parser.add_argument('--not_show_bbox', action='store_true')
self.parser.add_argument('--not_show_number', action='store_true')
self.parser.add_argument('--not_show_txt', action='store_true')
self.parser.add_argument('--qualitative', action='store_true')
self.parser.add_argument('--tango_color', action='store_true')
self.parser.add_argument('--only_show_dots', action='store_true')
self.parser.add_argument('--show_trace', action='store_true')
# model
self.parser.add_argument('--arch', default='dla_34',
help='model architecture. Currently tested'
'res_18 | res_101 | resdcn_18 | resdcn_101 |'
'dlav0_34 | dla_34 | hourglass')
self.parser.add_argument('--dla_node', default='dcn')
self.parser.add_argument('--head_conv', type=int, default=-1,
help='conv layer channels for output head'
'0 for no conv layer'
'-1 for default setting: '
'64 for resnets and 256 for dla.')
self.parser.add_argument('--num_head_conv', type=int, default=1)
self.parser.add_argument('--head_kernel', type=int, default=3, help='')
self.parser.add_argument('--down_ratio', type=int, default=4,
help='output stride. Currently only supports 4.')
self.parser.add_argument('--not_idaup', action='store_true')
self.parser.add_argument('--num_classes', type=int, default=-1)
self.parser.add_argument('--num_layers', type=int, default=101)
self.parser.add_argument('--backbone', default='dla34')
self.parser.add_argument('--neck', default='dlaup')
self.parser.add_argument('--msra_outchannel', type=int, default=256)
self.parser.add_argument('--efficient_level', type=int, default=0)
self.parser.add_argument('--prior_bias', type=float, default=-4.6) # -2.19
# input
self.parser.add_argument('--input_res', type=int, default=-1,
help='input height and width. -1 for default from '
'dataset. Will be overriden by input_h | input_w')
self.parser.add_argument('--input_h', type=int, default=-1,
help='input height. -1 for default from dataset.')
self.parser.add_argument('--input_w', type=int, default=-1,
help='input width. -1 for default from dataset.')
self.parser.add_argument('--dataset_version', default='')
# train
self.parser.add_argument('--optim', default='adam')
self.parser.add_argument('--lr', type=float, default=1.25e-4,
help='learning rate for batch size 32.')
self.parser.add_argument('--lr_step', type=str, default='60',
help='drop learning rate by 10.')
self.parser.add_argument('--save_point', type=str, default='90',
help='when to save the model to disk.')
self.parser.add_argument('--num_epochs', type=int, default=70,
help='total training epochs.')
self.parser.add_argument('--batch_size', type=int, default=32,
help='batch size')
self.parser.add_argument('--master_batch_size', type=int, default=-1,
help='batch size on the master gpu.')
self.parser.add_argument('--num_iters', type=int, default=-1,
help='default: #samples / batch_size.')
self.parser.add_argument('--val_intervals', type=int, default=10000,
help='number of epochs to run validation.')
self.parser.add_argument('--trainval', action='store_true',
help='include validation in training and '
'test on test set')
self.parser.add_argument('--ltrb', action='store_true',
help='')
self.parser.add_argument('--ltrb_weight', type=float, default=0.1,
help='')
self.parser.add_argument('--reset_hm', action='store_true')
self.parser.add_argument('--reuse_hm', action='store_true')
self.parser.add_argument('--use_kpt_center', action='store_true')
self.parser.add_argument('--add_05', action='store_true')
self.parser.add_argument('--dense_reg', type=int, default=1, help='')
# test
self.parser.add_argument('--flip_test', action='store_true',
help='flip data augmentation.')
self.parser.add_argument('--test_scales', type=str, default='1',
help='multi scale test augmentation.')
self.parser.add_argument('--nms', action='store_true',
help='run nms in testing.')
self.parser.add_argument('--K', type=int, default=100,
help='max number of output objects.')
self.parser.add_argument('--not_prefetch_test', action='store_true',
help='not use parallal data pre-processing.')
self.parser.add_argument('--fix_short', type=int, default=-1)
self.parser.add_argument('--keep_res', action='store_true',
help='keep the original resolution'
' during validation.')
self.parser.add_argument('--map_argoverse_id', action='store_true',
help='if trained on nuscenes and eval on kitti')
self.parser.add_argument('--out_thresh', type=float, default=-1,
help='')
self.parser.add_argument('--depth_scale', type=float, default=1,
help='')
self.parser.add_argument('--save_results', action='store_true')
self.parser.add_argument('--load_results', default='')
self.parser.add_argument('--use_loaded_results', action='store_true')
self.parser.add_argument('--ignore_loaded_cats', default='')
self.parser.add_argument('--model_output_list', action='store_true',
help='Used when convert to onnx')
self.parser.add_argument('--non_block_test', action='store_true')
self.parser.add_argument('--vis_gt_bev', default='', help='')
self.parser.add_argument('--kitti_split', default='3dop',
help='different validation split for kitti: '
'3dop | subcnn')
self.parser.add_argument('--test_focal_length', type=int, default=-1)
# dataset
self.parser.add_argument('--not_rand_crop', action='store_true',
help='not use the random crop data augmentation'
'from CornerNet.')
self.parser.add_argument('--not_max_crop', action='store_true',
help='used when the training dataset has'
'inbalanced aspect ratios.')
self.parser.add_argument('--shift', type=float, default=0,
help='when not using random crop, 0.1'
'apply shift augmentation.')
self.parser.add_argument('--scale', type=float, default=0,
help='when not using random crop, 0.4'
'apply scale augmentation.')
self.parser.add_argument('--aug_rot', type=float, default=0,
help='probability of applying '
'rotation augmentation.')
self.parser.add_argument('--rotate', type=float, default=0,
help='when not using random crop'
'apply rotation augmentation.')
self.parser.add_argument('--flip', type=float, default=0.5,
help='probability of applying flip augmentation.')
self.parser.add_argument('--no_color_aug', action='store_true',
help='not use the color augmenation '
'from CornerNet')
# Tracking
self.parser.add_argument('--tracking', action='store_true')
self.parser.add_argument('--pre_hm', action='store_true')
self.parser.add_argument('--same_aug_pre', action='store_true')
self.parser.add_argument('--zero_pre_hm', action='store_true')
self.parser.add_argument('--hm_disturb', type=float, default=0)
self.parser.add_argument('--lost_disturb', type=float, default=0)
self.parser.add_argument('--fp_disturb', type=float, default=0)
self.parser.add_argument('--pre_thresh', type=float, default=-1)
self.parser.add_argument('--track_thresh', type=float, default=0.3)
self.parser.add_argument('--new_thresh', type=float, default=0.3)
self.parser.add_argument('--max_frame_dist', type=int, default=3)
self.parser.add_argument('--ltrb_amodal', action='store_true')
self.parser.add_argument('--ltrb_amodal_weight', type=float, default=0.1)
self.parser.add_argument('--public_det', action='store_true')
self.parser.add_argument('--no_pre_img', action='store_true')
self.parser.add_argument('--zero_tracking', action='store_true')
self.parser.add_argument('--hungarian', action='store_true')
self.parser.add_argument('--max_age', type=int, default=-1)
# loss
self.parser.add_argument('--tracking_weight', type=float, default=1)
self.parser.add_argument('--reg_loss', default='l1',
help='regression loss: sl1 | l1 | l2')
self.parser.add_argument('--hm_weight', type=float, default=1,
help='loss weight for keypoint heatmaps.')
self.parser.add_argument('--off_weight', type=float, default=1,
help='loss weight for keypoint local offsets.')
self.parser.add_argument('--wh_weight', type=float, default=0.1,
help='loss weight for bounding box size.')
self.parser.add_argument('--hp_weight', type=float, default=1,
help='loss weight for human pose offset.')
self.parser.add_argument('--hm_hp_weight', type=float, default=1,
help='loss weight for human keypoint heatmap.')
self.parser.add_argument('--amodel_offset_weight', type=float, default=1,
help='Please forgive the typo.')
self.parser.add_argument('--dep_weight', type=float, default=1,
help='loss weight for depth.')
self.parser.add_argument('--dim_weight', type=float, default=1,
help='loss weight for 3d bounding box size.')
self.parser.add_argument('--rot_weight', type=float, default=1,
help='loss weight for orientation.')
self.parser.add_argument('--nuscenes_att', action='store_true')
self.parser.add_argument('--nuscenes_att_weight', type=float, default=1)
self.parser.add_argument('--velocity', action='store_true')
self.parser.add_argument('--velocity_weight', type=float, default=1)
# custom dataset
self.parser.add_argument('--custom_dataset_img_path', default='')
self.parser.add_argument('--custom_dataset_ann_path', default='')
def parse(self, args=''):
if args == '':
opt = self.parser.parse_args()
else:
opt = self.parser.parse_args(args)
if opt.test_dataset == '':
opt.test_dataset = opt.dataset
opt.gpus_str = opt.gpus
opt.gpus = [int(gpu) for gpu in opt.gpus.split(',')]
opt.gpus = [i for i in range(len(opt.gpus))] if opt.gpus[0] >=0 else [-1]
opt.lr_step = [int(i) for i in opt.lr_step.split(',')]
opt.save_point = [int(i) for i in opt.save_point.split(',')]
opt.test_scales = [float(i) for i in opt.test_scales.split(',')]
opt.save_imgs = [i for i in opt.save_imgs.split(',')] \
if opt.save_imgs != '' else []
opt.ignore_loaded_cats = \
[int(i) for i in opt.ignore_loaded_cats.split(',')] \
if opt.ignore_loaded_cats != '' else []
opt.num_workers = max(opt.num_workers, 2 * len(opt.gpus))
opt.pre_img = False
if 'tracking' in opt.task:
print('Running tracking')
opt.tracking = True
opt.out_thresh = max(opt.track_thresh, opt.out_thresh)
opt.pre_thresh = max(opt.track_thresh, opt.pre_thresh)
opt.new_thresh = max(opt.track_thresh, opt.new_thresh)
opt.pre_img = not opt.no_pre_img
print('Using tracking threshold for out threshold!', opt.track_thresh)
if 'ddd' in opt.task:
opt.show_track_color = True
opt.fix_res = not opt.keep_res
print('Fix size testing.' if opt.fix_res else 'Keep resolution testing.')
if opt.head_conv == -1: # init default head_conv
opt.head_conv = 256 if 'dla' in opt.arch else 64
opt.pad = 127 if 'hourglass' in opt.arch else 31
opt.num_stacks = 2 if opt.arch == 'hourglass' else 1
if opt.master_batch_size == -1:
opt.master_batch_size = opt.batch_size // len(opt.gpus)
rest_batch_size = (opt.batch_size - opt.master_batch_size)
opt.chunk_sizes = [opt.master_batch_size]
for i in range(len(opt.gpus) - 1):
slave_chunk_size = rest_batch_size // (len(opt.gpus) - 1)
if i < rest_batch_size % (len(opt.gpus) - 1):
slave_chunk_size += 1
opt.chunk_sizes.append(slave_chunk_size)
print('training chunk_sizes:', opt.chunk_sizes)
if opt.debug > 0:
opt.num_workers = 0
opt.batch_size = 1
opt.gpus = [opt.gpus[0]]
opt.master_batch_size = -1
# log dirs
opt.root_dir = os.path.join(os.path.dirname(__file__), '..', '..')
opt.data_dir = os.path.join(opt.root_dir, 'data')
opt.exp_dir = os.path.join(opt.root_dir, 'exp', opt.task)
opt.save_dir = os.path.join(opt.exp_dir, opt.exp_id)
opt.debug_dir = os.path.join(opt.save_dir, 'debug')
if opt.resume and opt.load_model == '':
opt.load_model = os.path.join(opt.save_dir, 'model_last.pth')
return opt
def update_dataset_info_and_set_heads(self, opt, dataset):
opt.num_classes = dataset.num_categories \
if opt.num_classes < 0 else opt.num_classes
# input_h(w): opt.input_h overrides opt.input_res overrides dataset default
input_h, input_w = dataset.default_resolution
input_h = opt.input_res if opt.input_res > 0 else input_h
input_w = opt.input_res if opt.input_res > 0 else input_w
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
opt.output_h = opt.input_h // opt.down_ratio
opt.output_w = opt.input_w // opt.down_ratio
opt.input_res = max(opt.input_h, opt.input_w)
opt.output_res = max(opt.output_h, opt.output_w)
opt.heads = {'hm': opt.num_classes, 'reg': 2, 'wh': 2}
if 'tracking' in opt.task:
opt.heads.update({'tracking': 2})
if 'ddd' in opt.task:
opt.heads.update({'dep': 1, 'rot': 8, 'dim': 3, 'amodel_offset': 2})
if 'multi_pose' in opt.task:
opt.heads.update({
'hps': dataset.num_joints * 2, 'hm_hp': dataset.num_joints,
'hp_offset': 2})
if opt.ltrb:
opt.heads.update({'ltrb': 4})
if opt.ltrb_amodal:
opt.heads.update({'ltrb_amodal': 4})
if opt.nuscenes_att:
opt.heads.update({'nuscenes_att': 8})
if opt.velocity:
opt.heads.update({'velocity': 3})
weight_dict = {'hm': opt.hm_weight, 'wh': opt.wh_weight,
'reg': opt.off_weight, 'hps': opt.hp_weight,
'hm_hp': opt.hm_hp_weight, 'hp_offset': opt.off_weight,
'dep': opt.dep_weight, 'rot': opt.rot_weight,
'dim': opt.dim_weight,
'amodel_offset': opt.amodel_offset_weight,
'ltrb': opt.ltrb_weight,
'tracking': opt.tracking_weight,
'ltrb_amodal': opt.ltrb_amodal_weight,
'nuscenes_att': opt.nuscenes_att_weight,
'velocity': opt.velocity_weight}
opt.weights = {head: weight_dict[head] for head in opt.heads}
for head in opt.weights:
if opt.weights[head] == 0:
del opt.heads[head]
opt.head_conv = {head: [opt.head_conv \
for i in range(opt.num_head_conv if head != 'reg' else 1)] for head in opt.heads}
print('input h w:', opt.input_h, opt.input_w)
print('heads', opt.heads)
print('weights', opt.weights)
print('head conv', opt.head_conv)
return opt
def init(self, args=''):
# only used in demo
default_dataset_info = {
'ctdet': 'coco', 'multi_pose': 'coco_hp', 'ddd': 'nuscenes',
'tracking,ctdet': 'coco', 'tracking,multi_pose': 'coco_hp',
'tracking,ddd': 'nuscenes'
}
opt = self.parse()
from dataset.dataset_factory import dataset_factory
train_dataset = default_dataset_info[opt.task] \
if opt.task in default_dataset_info else 'coco'
dataset = dataset_factory[train_dataset]
opt = self.update_dataset_info_and_set_heads(opt, dataset)
return opt
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/trainer.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import torch
import numpy as np
from progress.bar import Bar
from model.data_parallel import DataParallel
from utils.utils import AverageMeter
from model.losses import FastFocalLoss, RegWeightedL1Loss
from model.losses import BinRotLoss, WeightedBCELoss
from model.decode import generic_decode
from model.utils import _sigmoid, flip_tensor, flip_lr_off, flip_lr
from utils.debugger import Debugger
from utils.post_process import generic_post_process
class GenericLoss(torch.nn.Module):
def __init__(self, opt):
super(GenericLoss, self).__init__()
self.crit = FastFocalLoss(opt=opt)
self.crit_reg = RegWeightedL1Loss()
if 'rot' in opt.heads:
self.crit_rot = BinRotLoss()
if 'nuscenes_att' in opt.heads:
self.crit_nuscenes_att = WeightedBCELoss()
self.opt = opt
def _sigmoid_output(self, output):
if 'hm' in output:
output['hm'] = _sigmoid(output['hm'])
if 'hm_hp' in output:
output['hm_hp'] = _sigmoid(output['hm_hp'])
if 'dep' in output:
output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.
return output
def forward(self, outputs, batch):
opt = self.opt
losses = {head: 0 for head in opt.heads}
for s in range(opt.num_stacks):
output = outputs[s]
output = self._sigmoid_output(output)
if 'hm' in output:
losses['hm'] += self.crit(
output['hm'], batch['hm'], batch['ind'],
batch['mask'], batch['cat']) / opt.num_stacks
regression_heads = [
'reg', 'wh', 'tracking', 'ltrb', 'ltrb_amodal', 'hps',
'dep', 'dim', 'amodel_offset', 'velocity']
for head in regression_heads:
if head in output:
losses[head] += self.crit_reg(
output[head], batch[head + '_mask'],
batch['ind'], batch[head]) / opt.num_stacks
if 'hm_hp' in output:
losses['hm_hp'] += self.crit(
output['hm_hp'], batch['hm_hp'], batch['hp_ind'],
batch['hm_hp_mask'], batch['joint']) / opt.num_stacks
if 'hp_offset' in output:
losses['hp_offset'] += self.crit_reg(
output['hp_offset'], batch['hp_offset_mask'],
batch['hp_ind'], batch['hp_offset']) / opt.num_stacks
if 'rot' in output:
losses['rot'] += self.crit_rot(
output['rot'], batch['rot_mask'], batch['ind'], batch['rotbin'],
batch['rotres']) / opt.num_stacks
if 'nuscenes_att' in output:
losses['nuscenes_att'] += self.crit_nuscenes_att(
output['nuscenes_att'], batch['nuscenes_att_mask'],
batch['ind'], batch['nuscenes_att']) / opt.num_stacks
losses['tot'] = 0
for head in opt.heads:
losses['tot'] += opt.weights[head] * losses[head]
return losses['tot'], losses
class ModleWithLoss(torch.nn.Module):
def __init__(self, model, loss):
super(ModleWithLoss, self).__init__()
self.model = model
self.loss = loss
def forward(self, batch):
pre_img = batch['pre_img'] if 'pre_img' in batch else None
pre_hm = batch['pre_hm'] if 'pre_hm' in batch else None
outputs = self.model(batch['image'], pre_img, pre_hm)
loss, loss_stats = self.loss(outputs, batch)
return outputs[-1], loss, loss_stats
class Trainer(object):
def __init__(
self, opt, model, optimizer=None):
self.opt = opt
self.optimizer = optimizer
self.loss_stats, self.loss = self._get_losses(opt)
self.model_with_loss = ModleWithLoss(model, self.loss)
def set_device(self, gpus, chunk_sizes, device):
if len(gpus) > 1:
self.model_with_loss = DataParallel(
self.model_with_loss, device_ids=gpus,
chunk_sizes=chunk_sizes).to(device)
else:
self.model_with_loss = self.model_with_loss.to(device)
for state in self.optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.to(device=device, non_blocking=True)
def run_epoch(self, phase, epoch, data_loader):
model_with_loss = self.model_with_loss
if phase == 'train':
model_with_loss.train()
else:
if len(self.opt.gpus) > 1:
model_with_loss = self.model_with_loss.module
model_with_loss.eval()
torch.cuda.empty_cache()
opt = self.opt
results = {}
data_time, batch_time = AverageMeter(), AverageMeter()
avg_loss_stats = {l: AverageMeter() for l in self.loss_stats \
if l == 'tot' or opt.weights[l] > 0}
num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
bar = Bar('{}/{}'.format(opt.task, opt.exp_id), max=num_iters)
end = time.time()
for iter_id, batch in enumerate(data_loader):
if iter_id >= num_iters:
break
data_time.update(time.time() - end)
for k in batch:
if k != 'meta':
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output, loss, loss_stats = model_with_loss(batch)
loss = loss.mean()
if phase == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
Bar.suffix = '{phase}: [{0}][{1}/{2}]|Tot: {total:} |ETA: {eta:} '.format(
epoch, iter_id, num_iters, phase=phase,
total=bar.elapsed_td, eta=bar.eta_td)
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['image'].size(0))
Bar.suffix = Bar.suffix + '|{} {:.4f} '.format(l, avg_loss_stats[l].avg)
Bar.suffix = Bar.suffix + '|Data {dt.val:.3f}s({dt.avg:.3f}s) ' \
'|Net {bt.avg:.3f}s'.format(dt=data_time, bt=batch_time)
if opt.print_iter > 0: # If not using progress bar
if iter_id % opt.print_iter == 0:
print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
else:
bar.next()
if opt.debug > 0:
self.debug(batch, output, iter_id, dataset=data_loader.dataset)
del output, loss, loss_stats
bar.finish()
ret = {k: v.avg for k, v in avg_loss_stats.items()}
ret['time'] = bar.elapsed_td.total_seconds() / 60.
return ret, results
def _get_losses(self, opt):
loss_order = ['hm', 'wh', 'reg', 'ltrb', 'hps', 'hm_hp', \
'hp_offset', 'dep', 'dim', 'rot', 'amodel_offset', \
'ltrb_amodal', 'tracking', 'nuscenes_att', 'velocity']
loss_states = ['tot'] + [k for k in loss_order if k in opt.heads]
loss = GenericLoss(opt)
return loss_states, loss
def debug(self, batch, output, iter_id, dataset):
opt = self.opt
if 'pre_hm' in batch:
output.update({'pre_hm': batch['pre_hm']})
dets = generic_decode(output, K=opt.K, opt=opt)
for k in dets:
dets[k] = dets[k].detach().cpu().numpy()
dets_gt = batch['meta']['gt_det']
for i in range(1):
debugger = Debugger(opt=opt, dataset=dataset)
img = batch['image'][i].detach().cpu().numpy().transpose(1, 2, 0)
img = np.clip(((
img * dataset.std + dataset.mean) * 255.), 0, 255).astype(np.uint8)
pred = debugger.gen_colormap(output['hm'][i].detach().cpu().numpy())
gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hm')
debugger.add_blend_img(img, gt, 'gt_hm')
if 'pre_img' in batch:
pre_img = batch['pre_img'][i].detach().cpu().numpy().transpose(1, 2, 0)
pre_img = np.clip(((
pre_img * dataset.std + dataset.mean) * 255), 0, 255).astype(np.uint8)
debugger.add_img(pre_img, 'pre_img_pred')
debugger.add_img(pre_img, 'pre_img_gt')
if 'pre_hm' in batch:
pre_hm = debugger.gen_colormap(
batch['pre_hm'][i].detach().cpu().numpy())
debugger.add_blend_img(pre_img, pre_hm, 'pre_hm')
debugger.add_img(img, img_id='out_pred')
if 'ltrb_amodal' in opt.heads:
debugger.add_img(img, img_id='out_pred_amodal')
debugger.add_img(img, img_id='out_gt_amodal')
# Predictions
for k in range(len(dets['scores'][i])):
if dets['scores'][i, k] > opt.vis_thresh:
debugger.add_coco_bbox(
dets['bboxes'][i, k] * opt.down_ratio, dets['clses'][i, k],
dets['scores'][i, k], img_id='out_pred')
if 'ltrb_amodal' in opt.heads:
debugger.add_coco_bbox(
dets['bboxes_amodal'][i, k] * opt.down_ratio, dets['clses'][i, k],
dets['scores'][i, k], img_id='out_pred_amodal')
if 'hps' in opt.heads and int(dets['clses'][i, k]) == 0:
debugger.add_coco_hp(
dets['hps'][i, k] * opt.down_ratio, img_id='out_pred')
if 'tracking' in opt.heads:
debugger.add_arrow(
dets['cts'][i][k] * opt.down_ratio,
dets['tracking'][i][k] * opt.down_ratio, img_id='out_pred')
debugger.add_arrow(
dets['cts'][i][k] * opt.down_ratio,
dets['tracking'][i][k] * opt.down_ratio, img_id='pre_img_pred')
# Ground truth
debugger.add_img(img, img_id='out_gt')
for k in range(len(dets_gt['scores'][i])):
if dets_gt['scores'][i][k] > opt.vis_thresh:
debugger.add_coco_bbox(
dets_gt['bboxes'][i][k] * opt.down_ratio, dets_gt['clses'][i][k],
dets_gt['scores'][i][k], img_id='out_gt')
if 'ltrb_amodal' in opt.heads:
debugger.add_coco_bbox(
dets_gt['bboxes_amodal'][i, k] * opt.down_ratio,
dets_gt['clses'][i, k],
dets_gt['scores'][i, k], img_id='out_gt_amodal')
if 'hps' in opt.heads and \
(int(dets['clses'][i, k]) == 0):
debugger.add_coco_hp(
dets_gt['hps'][i][k] * opt.down_ratio, img_id='out_gt')
if 'tracking' in opt.heads:
debugger.add_arrow(
dets_gt['cts'][i][k] * opt.down_ratio,
dets_gt['tracking'][i][k] * opt.down_ratio, img_id='out_gt')
debugger.add_arrow(
dets_gt['cts'][i][k] * opt.down_ratio,
dets_gt['tracking'][i][k] * opt.down_ratio, img_id='pre_img_gt')
if 'hm_hp' in opt.heads:
pred = debugger.gen_colormap_hp(
output['hm_hp'][i].detach().cpu().numpy())
gt = debugger.gen_colormap_hp(batch['hm_hp'][i].detach().cpu().numpy())
debugger.add_blend_img(img, pred, 'pred_hmhp')
debugger.add_blend_img(img, gt, 'gt_hmhp')
if 'rot' in opt.heads and 'dim' in opt.heads and 'dep' in opt.heads:
dets_gt = {k: dets_gt[k].cpu().numpy() for k in dets_gt}
calib = batch['meta']['calib'].detach().numpy() \
if 'calib' in batch['meta'] else None
det_pred = generic_post_process(opt, dets,
batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes,
calib)
det_gt = generic_post_process(opt, dets_gt,
batch['meta']['c'].cpu().numpy(), batch['meta']['s'].cpu().numpy(),
output['hm'].shape[2], output['hm'].shape[3], self.opt.num_classes,
calib)
debugger.add_3d_detection(
batch['meta']['img_path'][i], batch['meta']['flipped'][i],
det_pred[i], calib[i],
vis_thresh=opt.vis_thresh, img_id='add_pred')
debugger.add_3d_detection(
batch['meta']['img_path'][i], batch['meta']['flipped'][i],
det_gt[i], calib[i],
vis_thresh=opt.vis_thresh, img_id='add_gt')
debugger.add_bird_views(det_pred[i], det_gt[i],
vis_thresh=opt.vis_thresh, img_id='bird_pred_gt')
if opt.debug == 4:
debugger.save_all_imgs(opt.debug_dir, prefix='{}'.format(iter_id))
else:
debugger.show_all_imgs(pause=True)
def val(self, epoch, data_loader):
return self.run_epoch('val', epoch, data_loader)
def train(self, epoch, data_loader):
return self.run_epoch('train', epoch, data_loader)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/utils/ddd_utils.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
def comput_corners_3d(dim, rotation_y):
# dim: 3
# location: 3
# rotation_y: 1
# return: 8 x 3
c, s = np.cos(rotation_y), np.sin(rotation_y)
R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32)
l, w, h = dim[2], dim[1], dim[0]
x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2]
y_corners = [0,0,0,0,-h,-h,-h,-h]
z_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2]
corners = np.array([x_corners, y_corners, z_corners], dtype=np.float32)
corners_3d = np.dot(R, corners).transpose(1, 0)
return corners_3d
def compute_box_3d(dim, location, rotation_y):
# dim: 3
# location: 3
# rotation_y: 1
# return: 8 x 3
corners_3d = comput_corners_3d(dim, rotation_y)
corners_3d = corners_3d + np.array(location, dtype=np.float32).reshape(1, 3)
return corners_3d
def project_to_image(pts_3d, P):
# pts_3d: n x 3
# P: 3 x 4
# return: n x 2
pts_3d_homo = np.concatenate(
[pts_3d, np.ones((pts_3d.shape[0], 1), dtype=np.float32)], axis=1)
pts_2d = np.dot(P, pts_3d_homo.transpose(1, 0)).transpose(1, 0)
pts_2d = pts_2d[:, :2] / pts_2d[:, 2:]
# import pdb; pdb.set_trace()
return pts_2d
def compute_orientation_3d(dim, location, rotation_y):
# dim: 3
# location: 3
# rotation_y: 1
# return: 2 x 3
c, s = np.cos(rotation_y), np.sin(rotation_y)
R = np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]], dtype=np.float32)
orientation_3d = np.array([[0, dim[2]], [0, 0], [0, 0]], dtype=np.float32)
orientation_3d = np.dot(R, orientation_3d)
orientation_3d = orientation_3d + \
np.array(location, dtype=np.float32).reshape(3, 1)
return orientation_3d.transpose(1, 0)
def draw_box_3d(image, corners, c=(255, 0, 255), same_color=False):
face_idx = [[0,1,5,4],
[1,2,6, 5],
[3,0,4,7],
[2,3,7,6]]
right_corners = [1, 2, 6, 5] if not same_color else []
left_corners = [0, 3, 7, 4] if not same_color else []
thickness = 4 if same_color else 2
corners = corners.astype(np.int32)
for ind_f in range(3, -1, -1):
f = face_idx[ind_f]
for j in range(4):
# print('corners', corners)
cc = c
if (f[j] in left_corners) and (f[(j+1)%4] in left_corners):
cc = (255, 0, 0)
if (f[j] in right_corners) and (f[(j+1)%4] in right_corners):
cc = (0, 0, 255)
try:
cv2.line(image, (corners[f[j], 0], corners[f[j], 1]),
(corners[f[(j+1)%4], 0], corners[f[(j+1)%4], 1]), cc, thickness, lineType=cv2.LINE_AA)
except:
pass
if ind_f == 0:
try:
cv2.line(image, (corners[f[0], 0], corners[f[0], 1]),
(corners[f[2], 0], corners[f[2], 1]), c, 1, lineType=cv2.LINE_AA)
cv2.line(image, (corners[f[1], 0], corners[f[1], 1]),
(corners[f[3], 0], corners[f[3], 1]), c, 1, lineType=cv2.LINE_AA)
except:
pass
# top_idx = [0, 1, 2, 3]
return image
def unproject_2d_to_3d(pt_2d, depth, P):
# pts_2d: 2
# depth: 1
# P: 3 x 4
# return: 3
z = depth - P[2, 3]
x = (pt_2d[0] * depth - P[0, 3] - P[0, 2] * z) / P[0, 0]
y = (pt_2d[1] * depth - P[1, 3] - P[1, 2] * z) / P[1, 1]
pt_3d = np.array([x, y, z], dtype=np.float32).reshape(3)
return pt_3d
def alpha2rot_y(alpha, x, cx, fx):
"""
Get rotation_y by alpha + theta - 180
alpha : Observation angle of object, ranging [-pi..pi]
x : Object center x to the camera center (x-W/2), in pixels
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
"""
rot_y = alpha + np.arctan2(x - cx, fx)
if rot_y > np.pi:
rot_y -= 2 * np.pi
if rot_y < -np.pi:
rot_y += 2 * np.pi
return rot_y
def rot_y2alpha(rot_y, x, cx, fx):
"""
Get rotation_y by alpha + theta - 180
alpha : Observation angle of object, ranging [-pi..pi]
x : Object center x to the camera center (x-W/2), in pixels
rotation_y : Rotation ry around Y-axis in camera coordinates [-pi..pi]
"""
alpha = rot_y - np.arctan2(x - cx, fx)
if alpha > np.pi:
alpha -= 2 * np.pi
if alpha < -np.pi:
alpha += 2 * np.pi
return alpha
def ddd2locrot(center, alpha, dim, depth, calib):
# single image
locations = unproject_2d_to_3d(center, depth, calib)
locations[1] += dim[0] / 2
rotation_y = alpha2rot_y(alpha, center[0], calib[0, 2], calib[0, 0])
return locations, rotation_y
def project_3d_bbox(location, dim, rotation_y, calib):
box_3d = compute_box_3d(dim, location, rotation_y)
box_2d = project_to_image(box_3d, calib)
return box_2d
if __name__ == '__main__':
calib = np.array(
[[7.070493000000e+02, 0.000000000000e+00, 6.040814000000e+02, 4.575831000000e+01],
[0.000000000000e+00, 7.070493000000e+02, 1.805066000000e+02, -3.454157000000e-01],
[0.000000000000e+00, 0.000000000000e+00, 1.000000000000e+00, 4.981016000000e-03]],
dtype=np.float32)
alpha = -0.20
tl = np.array([712.40, 143.00], dtype=np.float32)
br = np.array([810.73, 307.92], dtype=np.float32)
ct = (tl + br) / 2
rotation_y = 0.01
print('alpha2rot_y', alpha2rot_y(alpha, ct[0], calib[0, 2], calib[0, 0]))
print('rotation_y', rotation_y)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/utils/debugger.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from mpl_toolkits.mplot3d import Axes3D
import time
import numpy as np
import cv2
from .ddd_utils import compute_box_3d, project_to_image, draw_box_3d
class Debugger(object):
def __init__(self, opt, dataset):
self.opt = opt
self.imgs = {}
self.theme = opt.debugger_theme
self.plt = plt
self.with_3d = False
self.names = dataset.class_name
self.out_size = 384 if opt.dataset == 'kitti' else 512
self.cnt = 0
colors = [(color_list[i]).astype(np.uint8) for i in range(len(color_list))]
while len(colors) < len(self.names):
colors = colors + colors[:min(len(colors), len(self.names) - len(colors))]
self.colors = np.array(colors, dtype=np.uint8).reshape(len(colors), 1, 1, 3)
if self.theme == 'white':
self.colors = self.colors.reshape(-1)[::-1].reshape(len(colors), 1, 1, 3)
self.colors = np.clip(self.colors, 0., 0.6 * 255).astype(np.uint8)
self.num_joints = 17
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[3, 5], [4, 6], [5, 6],
[5, 7], [7, 9], [6, 8], [8, 10],
[5, 11], [6, 12], [11, 12],
[11, 13], [13, 15], [12, 14], [14, 16]]
self.ec = [(255, 0, 0), (0, 0, 255), (255, 0, 0), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 255),
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255),
(255, 0, 0), (0, 0, 255), (255, 0, 255),
(255, 0, 0), (255, 0, 0), (0, 0, 255), (0, 0, 255)]
self.colors_hp = [(128, 0, 128), (128, 0, 0), (0, 0, 128),
(128, 0, 0), (0, 0, 128), (128, 0, 0), (0, 0, 128),
(128, 0, 0), (0, 0, 128), (128, 0, 0), (0, 0, 128),
(128, 0, 0), (0, 0, 128), (128, 0, 0), (0, 0, 128),
(128, 0, 0), (0, 0, 128)]
self.track_color = {}
self.trace = {}
# print('names', self.names)
self.down_ratio=opt.down_ratio
# for bird view
self.world_size = 64
def add_img(self, img, img_id='default', revert_color=False):
if revert_color:
img = 255 - img
self.imgs[img_id] = img.copy()
def add_mask(self, mask, bg, imgId = 'default', trans = 0.8):
self.imgs[imgId] = (mask.reshape(
mask.shape[0], mask.shape[1], 1) * 255 * trans + \
bg * (1 - trans)).astype(np.uint8)
def show_img(self, pause = False, imgId = 'default'):
cv2.imshow('{}'.format(imgId), self.imgs[imgId])
if pause:
cv2.waitKey()
def add_blend_img(self, back, fore, img_id='blend', trans=0.7):
if self.theme == 'white':
fore = 255 - fore
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
if len(fore.shape) == 2:
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
self.imgs[img_id] = (back * (1. - trans) + fore * trans)
self.imgs[img_id][self.imgs[img_id] > 255] = 255
self.imgs[img_id][self.imgs[img_id] < 0] = 0
self.imgs[img_id] = self.imgs[img_id].astype(np.uint8).copy()
def gen_colormap(self, img, output_res=None):
img = img.copy()
# ignore region
img[img == 1] = 0.5
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
if self.opt.tango_color:
colors = tango_color_dark[:c].reshape(1, 1, c, 3)
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[1], output_res[0]))
return color_map
def gen_colormap_hp(self, img, output_res=None):
img = img.copy()
img[img == 1] = 0.5
c, h, w = img.shape[0], img.shape[1], img.shape[2]
if output_res is None:
output_res = (h * self.down_ratio, w * self.down_ratio)
img = img.transpose(1, 2, 0).reshape(h, w, c, 1).astype(np.float32)
colors = np.array(
self.colors_hp, dtype=np.float32).reshape(-1, 3)[:c].reshape(1, 1, c, 3)
if self.theme == 'white':
colors = 255 - colors
color_map = (img * colors).max(axis=2).astype(np.uint8)
color_map = cv2.resize(color_map, (output_res[0], output_res[1]))
return color_map
def _get_rand_color(self):
c = ((np.random.random((3)) * 0.6 + 0.2) * 255).astype(np.int32).tolist()
return c
def add_coco_bbox(self, bbox, cat, conf=1, show_txt=True,
no_bbox=False, img_id='default'):
if self.opt.show_track_color:
track_id = int(conf)
if not (track_id in self.track_color):
self.track_color[track_id] = self._get_rand_color()
c = self.track_color[track_id]
# thickness = 4
# fontsize = 0.8
if self.opt.only_show_dots:
ct = (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2))
cv2.circle(
self.imgs[img_id], ct ,8, c, -1, lineType=cv2.LINE_AA)
if self.opt.show_trace:
if track_id in self.trace:
trace = self.trace[track_id]
cnt = 0
t_pre = ct
for t in trace[::-1]:
cv2.circle(
self.imgs[img_id], t ,6-cnt*2, c, -1, lineType=cv2.LINE_AA)
cv2.line(self.imgs[img_id], t, t_pre, c, max(6-cnt*2, 1), lineType=cv2.LINE_AA)
t_pre = t
cnt = cnt + 1
if cnt >= 3:
break
self.trace[track_id].append(ct)
else:
self.trace[track_id] = [ct]
return
bbox = np.array(bbox, dtype=np.int32)
cat = int(cat)
c = self.colors[cat][0][0].tolist()
if self.theme == 'white':
c = (255 - np.array(c)).tolist()
if self.opt.tango_color:
c = (255 - tango_color_dark[cat][0][0]).tolist()
if conf >= 1:
ID = int(conf) if not self.opt.not_show_number else ''
txt = '{}{}'.format(self.names[cat], ID)
else:
txt = '{}{:.1f}'.format(self.names[cat], conf)
thickness = 2
fontsize = 0.8 if self.opt.qualitative else 0.5
if not self.opt.not_show_bbox:
font = cv2.FONT_HERSHEY_SIMPLEX
cat_size = cv2.getTextSize(txt, font, fontsize, thickness)[0]
if not no_bbox:
cv2.rectangle(
self.imgs[img_id], (bbox[0], bbox[1]), (bbox[2], bbox[3]),
c, thickness)
if show_txt:
cv2.rectangle(self.imgs[img_id],
(bbox[0], bbox[1] - cat_size[1] - thickness),
(bbox[0] + cat_size[0], bbox[1]), c, -1)
cv2.putText(self.imgs[img_id], txt, (bbox[0], bbox[1] - thickness - 1),
font, fontsize, (0, 0, 0), thickness=1, lineType=cv2.LINE_AA)
def add_tracking_id(self, ct, tracking_id, img_id='default'):
txt = '{}'.format(tracking_id)
fontsize = 0.5
cv2.putText(self.imgs[img_id], txt, (int(ct[0]), int(ct[1])),
cv2.FONT_HERSHEY_SIMPLEX, fontsize,
(255, 0, 255), thickness=1, lineType=cv2.LINE_AA)
def add_coco_hp(self, points, tracking_id=0, img_id='default'):
points = np.array(points, dtype=np.int32).reshape(self.num_joints, 2)
if not self.opt.show_track_color:
for j in range(self.num_joints):
cv2.circle(self.imgs[img_id],
(points[j, 0], points[j, 1]), 3, self.colors_hp[j], -1)
h, w = self.imgs[img_id].shape[0], self.imgs[img_id].shape[1]
for j, e in enumerate(self.edges):
if points[e].min() > 0 and points[e, 0].max() < w and \
points[e, 1].max() < h:
c = self.ec[j] if not self.opt.show_track_color else \
self.track_color[tracking_id]
cv2.line(self.imgs[img_id], (points[e[0], 0], points[e[0], 1]),
(points[e[1], 0], points[e[1], 1]), c, 2,
lineType=cv2.LINE_AA)
def clear(self):
return
def show_all_imgs(self, pause=False, Time=0):
if 1:
for i, v in self.imgs.items():
cv2.imshow('{}'.format(i), v)
if not self.with_3d:
cv2.waitKey(0 if pause else 1)
else:
max_range = np.array([
self.xmax-self.xmin, self.ymax-self.ymin, self.zmax-self.zmin]).max()
Xb = 0.5*max_range*np.mgrid[
-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(self.xmax+self.xmin)
Yb = 0.5*max_range*np.mgrid[
-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(self.ymax+self.ymin)
Zb = 0.5*max_range*np.mgrid[
-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(self.zmax+self.zmin)
for xb, yb, zb in zip(Xb, Yb, Zb):
self.ax.plot([xb], [yb], [zb], 'w')
if self.opt.debug == 9:
self.plt.pause(1e-27)
else:
self.plt.show()
else:
self.ax = None
nImgs = len(self.imgs)
fig=plt.figure(figsize=(nImgs * 10,10))
nCols = nImgs
nRows = nImgs // nCols
for i, (k, v) in enumerate(self.imgs.items()):
fig.add_subplot(1, nImgs, i + 1)
if len(v.shape) == 3:
plt.imshow(cv2.cvtColor(v, cv2.COLOR_BGR2RGB))
else:
plt.imshow(v)
plt.show()
def save_img(self, imgId='default', path='./cache/debug/'):
cv2.imwrite(path + '{}.png'.format(imgId), self.imgs[imgId])
def save_all_imgs(self, path='./cache/debug/', prefix='', genID=False):
if genID:
try:
idx = int(np.loadtxt(path + '/id.txt'))
except:
idx = 0
prefix=idx
np.savetxt(path + '/id.txt', np.ones(1) * (idx + 1), fmt='%d')
for i, v in self.imgs.items():
if i in self.opt.save_imgs or self.opt.save_imgs == []:
cv2.imwrite(
path + '/{}{}{}.png'.format(prefix, i, self.opt.save_img_suffix), v)
def remove_side(self, img_id, img):
if not (img_id in self.imgs):
return
ws = img.sum(axis=2).sum(axis=0)
l = 0
while ws[l] == 0 and l < len(ws):
l+= 1
r = ws.shape[0] - 1
while ws[r] == 0 and r > 0:
r -= 1
hs = img.sum(axis=2).sum(axis=1)
t = 0
while hs[t] == 0 and t < len(hs):
t += 1
b = hs.shape[0] - 1
while hs[b] == 0 and b > 0:
b -= 1
self.imgs[img_id] = self.imgs[img_id][t:b+1, l:r+1].copy()
def project_3d_to_bird(self, pt):
pt[0] += self.world_size / 2
pt[1] = self.world_size - pt[1]
pt = pt * self.out_size / self.world_size
return pt.astype(np.int32)
def add_3d_detection(
self, image_or_path, flipped, dets, calib, show_txt=False,
vis_thresh=0.3, img_id='det'):
if self.opt.only_show_dots:
return
if isinstance(image_or_path, np.ndarray):
self.imgs[img_id] = image_or_path.copy()
else:
self.imgs[img_id] = cv2.imread(image_or_path)
# thickness = 1
if self.opt.show_track_color:
# self.imgs[img_id] = (self.imgs[img_id] * 0.5 + \
# np.ones_like(self.imgs[img_id]) * 255 * 0.5).astype(np.uint8)
# thickness = 3
pass
if flipped:
self.imgs[img_id] = self.imgs[img_id][:, ::-1].copy()
for item in dets:
if item['score'] > vis_thresh \
and 'dim' in item and 'loc' in item and 'rot_y' in item:
cl = (self.colors[int(item['class']) - 1, 0, 0]).tolist() \
if not self.opt.show_track_color else \
self.track_color[int(item['tracking_id'])]
if self.theme == 'white' and not self.opt.show_track_color:
cl = (255 - np.array(cl)).tolist()
if self.opt.tango_color:
cl = (255 - tango_color_dark[int(item['class']) - 1, 0, 0]).tolist()
dim = item['dim']
loc = item['loc']
rot_y = item['rot_y']
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
self.imgs[img_id] = draw_box_3d(
self.imgs[img_id], box_2d.astype(np.int32), cl,
same_color=self.opt.show_track_color or self.opt.qualitative)
if self.opt.show_track_color or self.opt.qualitative:
bbox = [box_2d[:,0].min(), box_2d[:,1].min(),
box_2d[:,0].max(), box_2d[:,1].max()]
sc = int(item['tracking_id']) if self.opt.show_track_color else \
item['score']
self.add_coco_bbox(
bbox, item['class'] - 1, sc, show_txt=not self.opt.not_show_txt,
no_bbox=True, img_id=img_id)
if self.opt.show_track_color:
self.add_arrow([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2],
item['tracking'], img_id=img_id)
# print('===========================')
def compose_vis_ddd(
self, img_path, flipped, dets, calib,
vis_thresh, pred, bev, img_id='out'):
self.imgs[img_id] = cv2.imread(img_path)
if flipped:
self.imgs[img_id] = self.imgs[img_id][:, ::-1].copy()
h, w = pred.shape[:2]
hs, ws = self.imgs[img_id].shape[0] / h, self.imgs[img_id].shape[1] / w
self.imgs[img_id] = cv2.resize(self.imgs[img_id], (w, h))
self.add_blend_img(self.imgs[img_id], pred, img_id)
for item in dets:
if item['score'] > vis_thresh:
dim = item['dim']
loc = item['loc']
rot_y = item['rot_y']
cl = (self.colors[int(item['class']) - 1, 0, 0]).tolist()
if loc[2] > 1:
box_3d = compute_box_3d(dim, loc, rot_y)
box_2d = project_to_image(box_3d, calib)
box_2d[:, 0] /= hs
box_2d[:, 1] /= ws
self.imgs[img_id] = draw_box_3d(self.imgs[img_id], box_2d, cl)
self.imgs[img_id] = np.concatenate(
[self.imgs[img_id], self.imgs[bev]], axis=1)
def add_bird_view(self, dets, vis_thresh=0.3, img_id='bird', cnt=0):
if self.opt.vis_gt_bev:
bird_view = cv2.imread(
self.opt.vis_gt_bev + '/{}bird_pred_gt.png'.format(cnt))
else:
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for item in dets:
cl = (self.colors[int(item['class']) - 1, 0, 0]).tolist()
lc = (250, 152, 12)
if item['score'] > vis_thresh:
dim = item['dim']
loc = item['loc']
rot_y = item['rot_y']
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
def add_bird_views(self, dets_dt, dets_gt, vis_thresh=0.3, img_id='bird'):
bird_view = np.ones((self.out_size, self.out_size, 3), dtype=np.uint8) * 230
for ii, (dets, lc, cc) in enumerate(
[(dets_gt, (12, 49, 250), (0, 0, 255)),
(dets_dt, (250, 152, 12), (255, 0, 0))]):
for item in dets:
if item['score'] > vis_thresh \
and 'dim' in item and 'loc' in item and 'rot_y' in item:
dim = item['dim']
loc = item['loc']
rot_y = item['rot_y']
rect = compute_box_3d(dim, loc, rot_y)[:4, [0, 2]]
for k in range(4):
rect[k] = self.project_3d_to_bird(rect[k])
if ii == 0:
cv2.fillPoly(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
lc,lineType=cv2.LINE_AA)
else:
cv2.polylines(
bird_view,[rect.reshape(-1, 1, 2).astype(np.int32)],
True,lc,2,lineType=cv2.LINE_AA)
# for e in [[0, 1], [1, 2], [2, 3], [3, 0]]:
for e in [[0, 1]]:
t = 4 if e == [0, 1] else 1
cv2.line(bird_view, (rect[e[0]][0], rect[e[0]][1]),
(rect[e[1]][0], rect[e[1]][1]), lc, t,
lineType=cv2.LINE_AA)
self.imgs[img_id] = bird_view
def add_arrow(self, st, ed, img_id, c=(255, 0, 255), w=2):
if self.opt.only_show_dots:
return
cv2.arrowedLine(
self.imgs[img_id], (int(st[0]), int(st[1])),
(int(ed[0] + st[0]), int(ed[1] + st[1])), c, 2,
line_type=cv2.LINE_AA, tipLength=0.3)
color_list = np.array(
[1.000, 1.000, 1.000,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.333, 0.000, 0.500,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
0.000, 0.447, 0.741,
0.50, 0.5, 0
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
tango_color = [[252, 233, 79], # Butter 1
[237, 212, 0], # Butter 2
[196, 160, 0], # Butter 3
[138, 226, 52], # Chameleon 1
[115, 210, 22], # Chameleon 2
[ 78, 154, 6], # Chameleon 3
[252, 175, 62], # Orange 1
[245, 121, 0], # Orange 2
[206, 92, 0], # Orange 3
[114, 159, 207], # Sky Blue 1
[ 52, 101, 164], # Sky Blue 2
[ 32, 74, 135], # Sky Blue 3
[173, 127, 168], # Plum 1
[117, 80, 123], # Plum 2
[ 92, 53, 102], # Plum 3
[233, 185, 110], # Chocolate 1
[193, 125, 17], # Chocolate 2
[143, 89, 2], # Chocolate 3
[239, 41, 41], # Scarlet Red 1
[204, 0, 0], # Scarlet Red 2
[164, 0, 0], # Scarlet Red 3
[238, 238, 236], # Aluminium 1
[211, 215, 207], # Aluminium 2
[186, 189, 182], # Aluminium 3
[136, 138, 133], # Aluminium 4
[ 85, 87, 83], # Aluminium 5
[ 46, 52, 54], # Aluminium 6
]
tango_color = np.array(tango_color, np.uint8).reshape((-1, 1, 1, 3))
tango_color_dark = [
[114, 159, 207], # Sky Blue 1
[196, 160, 0], # Butter 3
[ 78, 154, 6], # Chameleon 3
[206, 92, 0], # Orange 3
[164, 0, 0], # Scarlet Red 3
[ 32, 74, 135], # Sky Blue 3
[ 92, 53, 102], # Plum 3
[143, 89, 2], # Chocolate 3
[ 85, 87, 83], # Aluminium 5
[186, 189, 182], # Aluminium 3
]
tango_color_dark = np.array(tango_color_dark, np.uint8).reshape((-1, 1, 1, 3))
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/utils/image.py | Python | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
import random
def flip(img):
return img[:, :, ::-1].copy()
# @numba.jit(nopython=True, nogil=True)
def transform_preds_with_trans(coords, trans):
# target_coords = np.concatenate(
# [coords, np.ones((coords.shape[0], 1), np.float32)], axis=1)
target_coords = np.ones((coords.shape[0], 3), np.float32)
target_coords[:, :2] = coords
target_coords = np.dot(trans, target_coords.transpose()).transpose()
return target_coords[:, :2]
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def crop(img, center, scale, output_size, rot=0):
trans = get_affine_transform(center, scale, rot, output_size)
dst_img = cv2.warpAffine(img,
trans,
(int(output_size[0]), int(output_size[1])),
flags=cv2.INTER_LINEAR)
return dst_img
# @numba.jit(nopython=True, nogil=True)
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
# @numba.jit(nopython=True, nogil=True)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
# y, x = np.arange(-m, m + 1).reshape(-1, 1), np.arange(-n, n + 1).reshape(1, -1)
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
# @numba.jit(nopython=True, nogil=True)
def draw_umich_gaussian(heatmap, center, radius, k=1):
# import pdb; pdb.set_trace()
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
# import pdb; pdb.set_trace()
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
dim = value.shape[0]
reg = np.ones((dim, diameter*2+1, diameter*2+1), dtype=np.float32) * value
if is_offset and dim == 2:
delta = np.arange(diameter*2+1) - radius
reg[0] = reg[0] - delta.reshape(1, -1)
reg[1] = reg[1] - delta.reshape(-1, 1)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom,
radius - left:radius + right]
masked_reg = reg[:, radius - top:radius + bottom,
radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
idx = (masked_gaussian >= masked_heatmap).reshape(
1, masked_gaussian.shape[0], masked_gaussian.shape[1])
masked_regmap = (1-idx) * masked_regmap + idx * masked_reg
regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
return regmap
def draw_msra_gaussian(heatmap, center, sigma):
tmp_size = sigma * 3
mu_x = int(center[0] + 0.5)
mu_y = int(center[1] + 0.5)
w, h = heatmap.shape[0], heatmap.shape[1]
ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
if ul[0] >= h or ul[1] >= w or br[0] < 0 or br[1] < 0:
return heatmap
size = 2 * tmp_size + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], h) - ul[0]
g_y = max(0, -ul[1]), min(br[1], w) - ul[1]
img_x = max(0, ul[0]), min(br[0], h)
img_y = max(0, ul[1]), min(br[1], w)
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]] = np.maximum(
heatmap[img_y[0]:img_y[1], img_x[0]:img_x[1]],
g[g_y[0]:g_y[1], g_x[0]:g_x[1]])
return heatmap
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3, ))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
| xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
src/lib/utils/post_process.py | Python | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import cv2
from .image import transform_preds_with_trans, get_affine_transform
from .ddd_utils import ddd2locrot, comput_corners_3d
from .ddd_utils import project_to_image, rot_y2alpha
import numba
def get_alpha(rot):
# output: (B, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
# bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
# return rot[:, 0]
idx = rot[:, 1] > rot[:, 5]
alpha1 = np.arctan2(rot[:, 2], rot[:, 3]) + (-0.5 * np.pi)
alpha2 = np.arctan2(rot[:, 6], rot[:, 7]) + ( 0.5 * np.pi)
return alpha1 * idx + alpha2 * (1 - idx)
def generic_post_process(
opt, dets, c, s, h, w, num_classes, calibs=None, height=-1, width=-1):
if not ('scores' in dets):
return [{}], [{}]
ret = []
for i in range(len(dets['scores'])):
preds = []
trans = get_affine_transform(
c[i], s[i], 0, (w, h), inv=1).astype(np.float32)
for j in range(len(dets['scores'][i])):
if dets['scores'][i][j] < opt.out_thresh:
break
item = {}
item['score'] = dets['scores'][i][j]
item['class'] = int(dets['clses'][i][j]) + 1
item['ct'] = transform_preds_with_trans(
(dets['cts'][i][j]).reshape(1, 2), trans).reshape(2)
if 'tracking' in dets:
tracking = transform_preds_with_trans(
(dets['tracking'][i][j] + dets['cts'][i][j]).reshape(1, 2),
trans).reshape(2)
item['tracking'] = tracking - item['ct']
if 'bboxes' in dets:
bbox = transform_preds_with_trans(
dets['bboxes'][i][j].reshape(2, 2), trans).reshape(4)
item['bbox'] = bbox
if 'hps' in dets:
pts = transform_preds_with_trans(
dets['hps'][i][j].reshape(-1, 2), trans).reshape(-1)
item['hps'] = pts
if 'dep' in dets and len(dets['dep'][i]) > j:
item['dep'] = dets['dep'][i][j]
if 'dim' in dets and len(dets['dim'][i]) > j:
item['dim'] = dets['dim'][i][j]
if 'rot' in dets and len(dets['rot'][i]) > j:
item['alpha'] = get_alpha(dets['rot'][i][j:j+1])[0]
if 'rot' in dets and 'dep' in dets and 'dim' in dets \
and len(dets['dep'][i]) > j:
if 'amodel_offset' in dets and len(dets['amodel_offset'][i]) > j:
ct_output = dets['bboxes'][i][j].reshape(2, 2).mean(axis=0)
amodel_ct_output = ct_output + dets['amodel_offset'][i][j]
ct = transform_preds_with_trans(
amodel_ct_output.reshape(1, 2), trans).reshape(2).tolist()
else:
bbox = item['bbox']
ct = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
item['ct'] = ct
item['loc'], item['rot_y'] = ddd2locrot(
ct, item['alpha'], item['dim'], item['dep'], calibs[i])
preds.append(item)
if 'nuscenes_att' in dets:
for j in range(len(preds)):
preds[j]['nuscenes_att'] = dets['nuscenes_att'][i][j]
if 'velocity' in dets:
for j in range(len(preds)):
preds[j]['velocity'] = dets['velocity'][i][j]
ret.append(preds)
return ret | xingyizhou/CenterTrack | 2,472 | Simultaneous object detection and tracking using center points. | Python | xingyizhou | Xingyi Zhou | Meta |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.