code stringlengths 17 6.64M |
|---|
def make_cuda_ext(name, module, sources):
define_macros = []
if (torch.cuda.is_available() or (os.getenv('FORCE_CUDA', '0') == '1')):
define_macros += [('WITH_CUDA', None)]
else:
raise EnvironmentError('CUDA is required to compile MMDetection!')
return CUDAExtension(name='{}.{}'.format(module, name), sources=[os.path.join(*module.split('.'), p) for p in sources], define_macros=define_macros, extra_compile_args={'cxx': [], 'nvcc': ['-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']})
|
def parse_requirements(fname='requirements.txt', with_version=True):
'Parse the package dependencies listed in a requirements file but strips\n specific versioning information.\n\n Args:\n fname (str): path to requirements file\n with_version (bool, default=False): if True include version specs\n\n Returns:\n List[str]: list of requirements items\n\n CommandLine:\n python -c "import setup; print(setup.parse_requirements())"\n '
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
'Parse information from a line in a requirements text file.'
if line.startswith('-r '):
target = line.split(' ')[1]
for info in parse_require_file(target):
(yield info)
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
pat = (('(' + '|'.join(['>=', '==', '>'])) + ')')
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if (len(parts) > 1):
(op, rest) = parts[1:]
if (';' in rest):
(version, platform_deps) = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = (op, version)
(yield info)
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if (line and (not line.startswith('#'))):
for info in parse_line(line):
(yield info)
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if (with_version and ('version' in info)):
parts.extend(info['version'])
if (not sys.version.startswith('3.4')):
platform_deps = info.get('platform_deps')
if (platform_deps is not None):
parts.append((';' + platform_deps))
item = ''.join(parts)
(yield item)
packages = list(gen_packages_items())
return packages
|
def test_max_iou_assigner():
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert (len(assign_result.gt_inds) == 4)
assert (len(assign_result.labels) == 4)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
assign_result = self.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, (- 1)])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_max_iou_assigner_with_empty_gt():
'Test corner case where an image might have no true detections.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_max_iou_assigner_with_empty_boxes():
'Test corner case where an network might predict no boxes.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert (len(assign_result.gt_inds) == 0)
assert (tuple(assign_result.labels.shape) == (0,))
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
assert (len(assign_result.gt_inds) == 0)
assert (assign_result.labels is None)
|
def test_max_iou_assigner_with_empty_boxes_and_ignore():
'Test corner case where an network might predict no boxes and\n ignore_iof_thr is on.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels, gt_bboxes_ignore=gt_bboxes_ignore)
assert (len(assign_result.gt_inds) == 0)
assert (tuple(assign_result.labels.shape) == (0,))
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore)
assert (len(assign_result.gt_inds) == 0)
assert (assign_result.labels is None)
|
def test_max_iou_assigner_with_empty_boxes_and_gt():
'Test corner case where an network might predict no boxes and no gt.'
self = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
assign_result = self.assign(bboxes, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_point_assigner():
self = PointAssigner()
points = torch.FloatTensor([[0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_point_assigner_with_empty_gt():
'Test corner case where an image might have no true detections.'
self = PointAssigner()
points = torch.FloatTensor([[0, 0, 1], [10, 10, 1], [5, 5, 1], [32, 32, 1]])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_point_assigner_with_empty_boxes_and_gt():
'Test corner case where an image might predict no points and no gt.'
self = PointAssigner()
points = torch.FloatTensor([])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_approx_iou_assigner():
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_approx_iou_assigner_with_empty_gt():
'Test corner case where an image might have no true detections.'
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all((assign_result.gt_inds == expected_gt_inds))
|
def test_approx_iou_assigner_with_empty_boxes():
'Test corner case where an network might predict no boxes.'
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_approx_iou_assigner_with_empty_boxes_and_gt():
'Test corner case where an network might predict no boxes and no gt.'
self = ApproxMaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave, gt_bboxes)
assert (len(assign_result.gt_inds) == 0)
|
def test_random_assign_result():
'Test random instantiation of assign result to catch corner cases.'
from mmdet.core.bbox.assigners.assign_result import AssignResult
AssignResult.random()
AssignResult.random(num_gts=0, num_preds=0)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=3, num_preds=3)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=7, num_preds=7)
AssignResult.random(num_gts=7, num_preds=64)
AssignResult.random(num_gts=24, num_preds=3)
|
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
|
class MaskRCNNDetector():
def __init__(self, model_config, checkpoint=None, streamqueue_size=3, device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
self.model = init_detector(model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if (sys.version_info >= (3, 7)):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = (await async_inference_detector(self.model, img))
return result
|
class AsyncInferenceTestCase(AsyncTestCase):
if (sys.version_info >= (3, 7)):
async def test_simple_inference(self):
if (not torch.cuda.is_available()):
import pytest
pytest.skip('test requires GPU and torch+cuda')
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(root_dir, 'configs/mask_rcnn_r50_fpn_1x.py')
detector = MaskRCNNDetector(model_config)
(await detector.init())
img_path = os.path.join(root_dir, 'demo/demo.jpg')
(bboxes, _) = (await detector.apredict(img_path))
self.assertTrue(bboxes)
|
def _get_config_directory():
'Find the predefined detector config directory.'
try:
repo_dpath = dirname(dirname(__file__))
except NameError:
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if (not exists(config_dpath)):
raise Exception('Cannot find config path')
return config_dpath
|
def test_config_build_detector():
'Test that all detection models defined in the configs can be\n initialized.'
from xdoctest.utils import import_module_from_path
from mmdet.models import build_detector
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
config_names = ['dcn/mask_rcnn_dconv_c3-c5_r50_fpn_1x.py', 'htc/htc_without_semantic_r50_fpn_1x.py', 'cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py', 'grid_rcnn/grid_rcnn_gn_head_r50_fpn_2x.py', 'double_heads/dh_faster_rcnn_r50_fpn_1x.py', 'empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x.py', 'guided_anchoring/ga_rpn_r50_caffe_fpn_1x.py', 'foveabox/fovea_r50_fpn_4gpu_1x.py', 'foveabox/fovea_align_gn_ms_r50_fpn_4gpu_2x.py', 'hrnet/fcos_hrnetv2p_w32_gn_1x_4gpu.py', 'gn+ws/mask_rcnn_r50_fpn_gn_ws_2x.py', 'pascal_voc/ssd300_voc.py', 'pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py', 'pascal_voc/ssd512_voc.py', 'gcnet/mask_rcnn_r50_fpn_sbn_1x.py', 'gn/mask_rcnn_r50_fpn_gn_contrib_2x.py', 'reppoints/reppoints_moment_r50_fpn_2x.py', 'reppoints/reppoints_partial_minmax_r50_fpn_1x.py', 'reppoints/bbox_r50_grid_center_fpn_1x.py', 'reppoints/reppoints_minmax_r50_fpn_1x.py', 'reppoints/bbox_r50_grid_fpn_1x.py', 'fcos/fcos_r50_caffe_fpn_gn_1x_4gpu.py', 'albu_example/mask_rcnn_r50_fpn_1x.py', 'libra_rcnn/libra_faster_rcnn_r50_fpn_1x.py', 'fp16/mask_rcnn_r50_fpn_fp16_1x.py', 'fp16/faster_rcnn_r50_fpn_fp16_1x.py']
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = import_module_from_path(config_fpath)
config_mod.model
config_mod.train_cfg
config_mod.test_cfg
print('Building detector, config_fpath = {!r}'.format(config_fpath))
if ('pretrained' in config_mod.model):
config_mod.model['pretrained'] = None
detector = build_detector(config_mod.model, train_cfg=config_mod.train_cfg, test_cfg=config_mod.test_cfg)
assert (detector is not None)
|
def test_config_data_pipeline():
'Test whether the data pipeline is valid and can process corner cases.\n\n CommandLine:\n xdoctest -m tests/test_config.py test_config_build_data_pipeline\n '
from xdoctest.utils import import_module_from_path
from mmdet.datasets.pipelines import Compose
import numpy as np
config_dpath = _get_config_directory()
print('Found config_dpath = {!r}'.format(config_dpath))
config_names = ['wider_face/ssd300_wider_face.py', 'pascal_voc/ssd300_voc.py', 'pascal_voc/ssd512_voc.py', 'fp16/mask_rcnn_r50_fpn_fp16_1x.py']
print('Using {} config files'.format(len(config_names)))
for config_fname in config_names:
config_fpath = join(config_dpath, config_fname)
config_mod = import_module_from_path(config_fpath)
loading_pipeline = config_mod.train_pipeline.pop(0)
config_mod.train_pipeline.pop(0)
config_mod.test_pipeline.pop(0)
train_pipeline = Compose(config_mod.train_pipeline)
test_pipeline = Compose(config_mod.test_pipeline)
print('Building data pipeline, config_fpath = {!r}'.format(config_fpath))
print('Test training data pipeline: \n{!r}'.format(train_pipeline))
img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
if loading_pipeline.get('to_float32', False):
img = img.astype(np.float32)
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32), gt_labels=np.array([1], dtype=np.int64), gt_masks=[(img[(..., 0)] == 233).astype(np.uint8)])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert (output_results is not None)
print('Test testing data pipeline: \n{!r}'.format(test_pipeline))
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32), gt_labels=np.array([1], dtype=np.int64), gt_masks=[(img[(..., 0)] == 233).astype(np.uint8)])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert (output_results is not None)
print('Test empty GT with training data pipeline: \n{!r}'.format(train_pipeline))
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.zeros((0, 4), dtype=np.float32), gt_labels=np.array([], dtype=np.int64), gt_masks=[])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert (output_results is not None)
print('Test empty GT with testing data pipeline: \n{!r}'.format(test_pipeline))
results = dict(filename='test_img.png', img=img, img_shape=img.shape, ori_shape=img.shape, gt_bboxes=np.zeros((0, 4), dtype=np.float32), gt_labels=np.array([], dtype=np.int64), gt_masks=[])
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert (output_results is not None)
|
def test_nms_device_and_dtypes_cpu():
'\n CommandLine:\n xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_cpu\n '
iou_thr = 0.7
base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9], [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]])
dets = base_dets.astype(np.float32)
(supressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.FloatTensor(base_dets)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3)
dets = base_dets.astype(np.float64)
(supressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.DoubleTensor(base_dets)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3)
|
def test_nms_device_and_dtypes_gpu():
'\n CommandLine:\n xdoctest -m tests/test_nms.py test_nms_device_and_dtypes_gpu\n '
if (not torch.cuda.is_available()):
import pytest
pytest.skip('test requires GPU and torch+cuda')
iou_thr = 0.7
base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9], [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]])
for device_id in range(torch.cuda.device_count()):
print('Run NMS on device_id = {!r}'.format(device_id))
dets = base_dets.astype(np.float32)
(supressed, inds) = nms(dets, iou_thr, device_id)
assert (dets.dtype == supressed.dtype)
assert (len(inds) == len(supressed) == 3)
dets = torch.FloatTensor(base_dets).to(device_id)
(surpressed, inds) = nms(dets, iou_thr)
assert (dets.dtype == surpressed.dtype)
assert (len(inds) == len(surpressed) == 3)
|
def test_random_sampler():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
sampler = RandomSampler(num=10, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_random_sampler_empty_gt():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0).long()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(num=10, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(num=10, pos_fraction=0.5, neg_pos_ub=(- 1), add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def _context_for_ohem():
try:
from test_forward import _get_detector_cfg
except ImportError:
import sys
from os.path import dirname
sys.path.insert(0, dirname(__file__))
from test_forward import _get_detector_cfg
(model, train_cfg, test_cfg) = _get_detector_cfg('faster_rcnn_ohem_r50_fpn_1x.py')
model['pretrained'] = None
model['bbox_roi_extractor']['roi_layer']['use_torchvision'] = True
from mmdet.models import build_detector
context = build_detector(model, train_cfg=train_cfg, test_cfg=test_cfg)
return context
|
def test_ohem_sampler():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.FloatTensor([[0, 0, 10, 9], [0, 10, 10, 19]])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([[30, 30, 40, 40]])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(pos_iou_thr=0.5, neg_iou_thr=0.5, ignore_iof_thr=0.5, ignore_wrt_candidates=False)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [5, 5, 15, 15], [32, 32, 38, 42]])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(num=10, pos_fraction=0.5, context=context, neg_pos_ub=(- 1), add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int((2 ** i)), int((2 ** i))) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds))
|
def test_random_sample_result():
from mmdet.core.bbox.samplers.sampling_result import SamplingResult
SamplingResult.random(num_gts=0, num_preds=0)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=3, num_preds=3)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=7, num_preds=7)
SamplingResult.random(num_gts=7, num_preds=64)
SamplingResult.random(num_gts=24, num_preds=3)
for i in range(3):
SamplingResult.random(rng=i)
|
def test_soft_nms_device_and_dtypes_cpu():
'\n CommandLine:\n xdoctest -m tests/test_soft_nms.py test_soft_nms_device_and_dtypes_cpu\n '
iou_thr = 0.7
base_dets = np.array([[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9], [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]])
dets = base_dets.astype(np.float32)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
dets = torch.FloatTensor(base_dets)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
dets = base_dets.astype(np.float64)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
dets = torch.DoubleTensor(base_dets)
(new_dets, inds) = soft_nms(dets, iou_thr)
assert (dets.dtype == new_dets.dtype)
assert (len(inds) == len(new_dets) == 4)
|
def test_params_to_string():
npt.assert_equal(params_to_string(1000000000.0), '1000.0 M')
npt.assert_equal(params_to_string(200000.0), '200.0 k')
npt.assert_equal(params_to_string(3e-09), '3e-09')
|
def cal_train_time(log_dicts, args):
for (i, log_dict) in enumerate(log_dicts):
print('{}Analyze train time of {}{}'.format(('-' * 5), args.json_logs[i], ('-' * 5)))
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
all_times = np.array(all_times)
epoch_ave_time = all_times.mean((- 1))
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print('slowest epoch {}, average time is {:.4f}'.format((slowest_epoch + 1), epoch_ave_time[slowest_epoch]))
print('fastest epoch {}, average time is {:.4f}'.format((fastest_epoch + 1), epoch_ave_time[fastest_epoch]))
print('time std over epochs is {:.4f}'.format(std_over_epoch))
print('average iter time: {:.4f} s/iter'.format(np.mean(all_times)))
print()
|
def plot_curve(log_dicts, args):
if (args.backend is not None):
plt.switch_backend(args.backend)
sns.set_style(args.style)
legend = args.legend
if (legend is None):
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append('{}_{}'.format(json_log, metric))
assert (len(legend) == (len(args.json_logs) * len(args.keys)))
metrics = args.keys
num_metrics = len(metrics)
for (i, log_dict) in enumerate(log_dicts):
epochs = list(log_dict.keys())
for (j, metric) in enumerate(metrics):
print('plot curve of {}, metric is {}'.format(args.json_logs[i], metric))
if (metric not in log_dict[epochs[0]]):
raise KeyError('{} does not contain metric {}'.format(args.json_logs[i], metric))
if ('mAP' in metric):
xs = np.arange(1, (max(epochs) + 1))
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
ax = plt.gca()
ax.set_xticks(xs)
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[((i * num_metrics) + j)], marker='o')
else:
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][(- 1)]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if (log_dict[epoch]['mode'][(- 1)] == 'val'):
iters = iters[:(- 1)]
xs.append((np.array(iters) + ((epoch - 1) * num_iters_per_epoch)))
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(xs, ys, label=legend[((i * num_metrics) + j)], linewidth=0.5)
plt.legend()
if (args.title is not None):
plt.title(args.title)
if (args.out is None):
plt.show()
else:
print('save curve to: {}'.format(args.out))
plt.savefig(args.out)
plt.cla()
|
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser('plot_curve', help='parser for plotting curves')
parser_plt.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_plt.add_argument('--keys', type=str, nargs='+', default=['bbox_mAP'], help='the metric that you want to plot')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument('--legend', type=str, nargs='+', default=None, help='legend of each plot')
parser_plt.add_argument('--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument('--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
|
def add_time_parser(subparsers):
parser_time = subparsers.add_parser('cal_train_time', help='parser for computing the average time per training iteration')
parser_time.add_argument('json_logs', type=str, nargs='+', help='path of train log in json format')
parser_time.add_argument('--include-outliers', action='store_true', help='include the first value of every epoch when computing the average time')
|
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
|
def load_json_logs(json_logs):
log_dicts = [dict() for _ in json_logs]
for (json_log, log_dict) in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
if ('epoch' not in log):
continue
epoch = log.pop('epoch')
if (epoch not in log_dict):
log_dict[epoch] = defaultdict(list)
for (k, v) in log.items():
log_dict[epoch][k].append(v)
return log_dicts
|
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
|
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument('--skip-type', type=str, nargs='+', default=['DefaultFormatBundle', 'Normalize', 'Collect'], help='skip some useless pipeline')
parser.add_argument('--output-dir', default=None, type=str, help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument('--show-interval', type=int, default=999, help='the interval of show (ms)')
args = parser.parse_args()
return args
|
def retrieve_data_cfg(config_path, skip_type):
cfg = Config.fromfile(config_path)
train_data_cfg = cfg.data.train
train_data_cfg['pipeline'] = [x for x in train_data_cfg.pipeline if (x['type'] not in skip_type)]
return cfg
|
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = (os.path.join(args.output_dir, Path(item['filename']).name) if (args.output_dir is not None) else None)
mmcv.imshow_det_bboxes(item['img'], item['gt_bboxes'], (item['gt_labels'] - 1), class_names=dataset.CLASSES, show=(not args.not_show), out_file=filename, wait_time=args.show_interval)
progress_bar.update()
|
def parse_xml(args):
(xml_path, img_path) = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [int(bnd_box.find('xmin').text), int(bnd_box.find('ymin').text), int(bnd_box.find('xmax').text), int(bnd_box.find('ymax').text)]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
annotation = {'filename': img_path, 'width': w, 'height': h, 'ann': {'bboxes': bboxes.astype(np.float32), 'labels': labels.astype(np.int64), 'bboxes_ignore': bboxes_ignore.astype(np.float32), 'labels_ignore': labels_ignore.astype(np.int64)}}
return annotation
|
def cvt_annotations(devkit_path, years, split, out_file):
if (not isinstance(years, list)):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format(year, split))
if (not osp.isfile(filelist)):
print('filelist does not exist: {}, skip voc{} {}'.format(filelist, year, split))
return
img_names = mmcv.list_from_file(filelist)
xml_paths = [osp.join(devkit_path, 'VOC{}/Annotations/{}.xml'.format(year, img_name)) for img_name in img_names]
img_paths = ['VOC{}/JPEGImages/{}.jpg'.format(year, img_name) for img_name in img_names]
part_annotations = mmcv.track_progress(parse_xml, list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
mmcv.dump(annotations, out_file)
return annotations
|
def parse_args():
parser = argparse.ArgumentParser(description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = (args.out_dir if args.out_dir else devkit_path)
mmcv.mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if (('2007' in years) and ('2012' in years)):
years.append(['2007', '2012'])
if (not years):
raise IOError('The devkit path {} contains neither "VOC2007" nor "VOC2012" subfolder'.format(devkit_path))
for year in years:
if (year == '2007'):
prefix = 'voc07'
elif (year == '2012'):
prefix = 'voc12'
elif (year == ['2007', '2012']):
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = ((prefix + '_') + split)
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, split, osp.join(out_dir, (dataset_name + '.pkl')))
if (not isinstance(year, list)):
dataset_name = (prefix + '_test')
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, 'test', osp.join(out_dir, (dataset_name + '.pkl')))
print('Done!')
|
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
state_dict[(torch_name + '.bias')] = torch.from_numpy(blobs[(caffe_name + '_b')])
state_dict[(torch_name + '.weight')] = torch.from_numpy(blobs[(caffe_name + '_s')])
bn_size = state_dict[(torch_name + '.weight')].size()
state_dict[(torch_name + '.running_mean')] = torch.zeros(bn_size)
state_dict[(torch_name + '.running_var')] = torch.ones(bn_size)
converted_names.add((caffe_name + '_b'))
converted_names.add((caffe_name + '_s'))
|
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name, converted_names):
state_dict[(torch_name + '.weight')] = torch.from_numpy(blobs[(caffe_name + '_w')])
converted_names.add((caffe_name + '_w'))
if ((caffe_name + '_b') in blobs):
state_dict[(torch_name + '.bias')] = torch.from_numpy(blobs[(caffe_name + '_b')])
converted_names.add((caffe_name + '_b'))
|
def convert(src, dst, depth):
'Convert keys in detectron pretrained ResNet models to pytorch style.'
if (depth not in arch_settings):
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
caffe_model = mmcv.load(src, encoding='latin1')
blobs = (caffe_model['blobs'] if ('blobs' in caffe_model) else caffe_model)
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, (len(block_nums) + 1)):
for j in range(block_nums[(i - 1)]):
if (j == 0):
convert_conv_fc(blobs, state_dict, 'res{}_{}_branch1'.format((i + 1), j), 'layer{}.{}.downsample.0'.format(i, j), converted_names)
convert_bn(blobs, state_dict, 'res{}_{}_branch1_bn'.format((i + 1), j), 'layer{}.{}.downsample.1'.format(i, j), converted_names)
for (k, letter) in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict, 'res{}_{}_branch2{}'.format((i + 1), j, letter), 'layer{}.{}.conv{}'.format(i, j, (k + 1)), converted_names)
convert_bn(blobs, state_dict, 'res{}_{}_branch2{}_bn'.format((i + 1), j, letter), 'layer{}.{}.bn{}'.format(i, j, (k + 1)), converted_names)
for key in blobs:
if (key not in converted_names):
print('Not Convert: {}'.format(key))
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
|
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
|
def fuse_conv_bn(conv, bn):
'During inference, the functionary of batch norm layers is turned off but\n only the mean and var alone channels are used, which exposes the chance to\n fuse it with the preceding conv layers to save computations and simplify\n network structures.'
conv_w = conv.weight
conv_b = (conv.bias if (conv.bias is not None) else torch.zeros_like(bn.running_mean))
factor = (bn.weight / torch.sqrt((bn.running_var + bn.eps)))
conv.weight = nn.Parameter((conv_w * factor.reshape([conv.out_channels, 1, 1, 1])))
conv.bias = nn.Parameter((((conv_b - bn.running_mean) * factor) + bn.bias))
return conv
|
def fuse_module(m):
last_conv = None
last_conv_name = None
for (name, child) in m.named_children():
if isinstance(child, (nn.BatchNorm2d, nn.SyncBatchNorm)):
if (last_conv is None):
continue
fused_conv = fuse_conv_bn(last_conv, child)
m._modules[last_conv_name] = fused_conv
m._modules[name] = nn.Identity()
last_conv = None
elif isinstance(child, nn.Conv2d):
last_conv = child
last_conv_name = name
else:
fuse_module(child)
return m
|
def parse_args():
parser = argparse.ArgumentParser(description='fuse Conv and BN layers in a model')
parser.add_argument('config', help='config file path')
parser.add_argument('checkpoint', help='checkpoint file path')
parser.add_argument('out', help='output path of the converted model')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
model = init_detector(args.config, args.checkpoint)
fused_model = fuse_module(model)
save_checkpoint(fused_model, args.out)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--shape', type=int, nargs='+', default=[1280, 800], help='input image size')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError('FLOPs counter is currently not currently supported with {}'.format(model.__class__.__name__))
(flops, params) = get_model_complexity_info(model, input_shape)
split_line = ('=' * 30)
print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(split_line, input_shape, flops, params))
print('!!!Please be cautious if you use the results in papers. You may need to check if all ops are supported and verify that the flops computation is correct.')
|
def parse_args():
parser = argparse.ArgumentParser(description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
|
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
if ('optimizer' in checkpoint):
del checkpoint['optimizer']
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = (out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8]))
subprocess.Popen(['mv', out_file, final_file])
|
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
|
def export_onnx_model(model, inputs, passes):
'Trace and export a model to onnx format. Modified from\n https://github.com/facebookresearch/detectron2/\n\n Args:\n model (nn.Module):\n inputs (tuple[args]): the model will be called by `model(*inputs)`\n passes (None or list[str]): the optimization passed for ONNX model\n\n Returns:\n an onnx model\n '
assert isinstance(model, torch.nn.Module)
def _check_eval(module):
assert (not module.training)
model.apply(_check_eval)
with torch.no_grad():
with io.BytesIO() as f:
torch.onnx.export(model, inputs, f, operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK)
onnx_model = onnx.load_from_string(f.getvalue())
if (passes is not None):
all_passes = optimizer.get_available_passes()
assert all(((p in all_passes) for p in passes)), 'Only {} are supported'.format(all_passes)
onnx_model = optimizer.optimize(onnx_model, passes)
return onnx_model
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet pytorch model conversion to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', type=str, required=True, help='output ONNX filename')
parser.add_argument('--shape', type=int, nargs='+', default=[1280, 800], help='input image size')
parser.add_argument('--passes', type=str, nargs='+', help='ONNX optimization passes')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
if (not args.out.endswith('.onnx')):
raise ValueError('The output file must be a onnx file.')
if (len(args.shape) == 1):
input_shape = (3, args.shape[0], args.shape[0])
elif (len(args.shape) == 2):
input_shape = ((3,) + tuple(args.shape))
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
load_checkpoint(model, args.checkpoint, map_location='cpu')
model.cpu().eval()
for m in model.modules():
if isinstance(m, (RoIPool, RoIAlign)):
m.use_torchvision = True
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError('ONNX conversion is currently not currently supported with {}'.format(model.__class__.__name__))
input_data = torch.empty((1, *input_shape), dtype=next(model.parameters()).dtype, device=next(model.parameters()).device)
onnx_model = export_onnx_model(model, (input_data,), args.passes)
onnx.helper.printable_graph(onnx_model.graph)
print('saving model in {}'.format(args.out))
onnx.save(onnx_model, args.out)
|
class MultipleKVAction(argparse.Action):
'\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options should\n be passed as comma separated values, i.e KEY=V1,V2,V3\n '
def _parse_int_float_bool(self, val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if (val.lower() in ['true', 'false']):
return (True if (val.lower() == 'true') else False)
return val
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
(key, val) = kv.split('=', maxsplit=1)
val = [self._parse_int_float_bool(v) for v in val.split(',')]
if (len(val) == 1):
val = val[0]
options[key] = val
setattr(namespace, self.dest, options)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse_conv_bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format_only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--gpu_collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu_collect is not specified')
parser.add_argument('--options', nargs='+', action=MultipleKVAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show), 'Please specify at least one operation (save/eval/format/show the results) with the argument "--out", "--eval", "--format_only" or "--show"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
if ('CLASSES' in checkpoint['meta']):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.options is None) else args.options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
|
def coco_eval_with_return(result_files, result_types, coco, max_dets=(100, 300, 1000)):
for res_type in result_types:
assert (res_type in ['proposal', 'bbox', 'segm', 'keypoints'])
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = ('bbox' if (res_type == 'proposal') else res_type)
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if (res_type == 'proposal'):
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if ((res_type == 'segm') or (res_type == 'bbox')):
metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100', 'ARs', 'ARm', 'ARl']
eval_results[res_type] = {metric_names[i]: cocoEval.stats[i] for i in range(len(metric_names))}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
|
def voc_eval_with_return(result_file, dataset, iou_thr=0.5, logger='print', only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if (hasattr(dataset, 'year') and (dataset.year == 2007)):
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
(mean_ap, eval_results) = eval_map(det_results, annotations, scale_ranges=None, iou_thr=iou_thr, dataset=dataset_name, logger=logger)
if only_ap:
eval_results = [{'ap': eval_results[i]['ap']} for i in range(len(eval_results))]
return (mean_ap, eval_results)
|
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=(not show), **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
batch_size = data['img'][0].size(0)
for _ in range(batch_size):
prog_bar.update()
return results
|
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if (rank == 0):
prog_bar = mmcv.ProgressBar(len(dataset))
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if (rank == 0):
batch_size = data['img'][0].size(0)
for _ in range((batch_size * world_size)):
prog_bar.update()
results = collect_results(results, len(dataset), tmpdir)
return results
|
def collect_results(result_part, size, tmpdir=None):
(rank, world_size) = get_dist_info()
if (tmpdir is None):
MAX_LEN = 512
dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda')
if (rank == 0):
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
if (rank != 0):
return None
else:
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
ordered_results = ordered_results[:size]
shutil.rmtree(tmpdir)
return ordered_results
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument('--corruptions', type=str, nargs='+', default='benchmark', choices=['all', 'benchmark', 'noise', 'blur', 'weather', 'digital', 'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate'], help='corruptions')
parser.add_argument('--severities', type=int, nargs='+', default=[0, 1, 2, 3, 4, 5], help='corruption severity levels')
parser.add_argument('--eval', type=str, nargs='+', choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'], help='eval types')
parser.add_argument('--iou-thr', type=float, default=0.5, help='IoU threshold for pascal voc evaluation')
parser.add_argument('--summaries', type=bool, default=False, help='Print summaries for every corruption and severity')
parser.add_argument('--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--final-prints', type=str, nargs='+', choices=['P', 'mPC', 'rPC'], default='mPC', help='corruption benchmark metric to print at the end')
parser.add_argument('--final-prints-aggregate', type=str, choices=['all', 'benchmark'], default='benchmark', help='aggregate all results or only those for benchmark corruptions')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
assert (args.out or args.show), 'Please specify at least one operation (save or show the results) with the argument "--out" or "--show"'
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.workers == 0):
args.workers = cfg.data.workers_per_gpu
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
if (args.seed is not None):
set_random_seed(args.seed)
if ('all' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif ('benchmark' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog', 'brightness', 'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression']
elif ('noise' in args.corruptions):
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif ('blur' in args.corruptions):
corruptions = ['defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur']
elif ('weather' in args.corruptions):
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif ('digital' in args.corruptions):
corruptions = ['contrast', 'elastic_transform', 'pixelate', 'jpeg_compression']
elif ('holdout' in args.corruptions):
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif ('None' in args.corruptions):
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
(rank, _) = get_dist_info()
aggregated_results = {}
for (corr_i, corruption) in enumerate(corruptions):
aggregated_results[corruption] = {}
for (sev_i, corruption_severity) in enumerate(args.severities):
if ((corr_i > 0) and (corruption_severity == 0)):
aggregated_results[corruption][0] = aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
if (corruption_severity > 0):
corruption_trans = dict(type='Corrupt', corruption=corruption, severity=corruption_severity)
test_data_cfg['pipeline'].insert(1, corruption_trans)
print('\nTesting {} at severity {}'.format(corruption, corruption_severity))
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=args.workers, dist=distributed, shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if ('CLASSES' in checkpoint['meta']):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if (args.out and (rank == 0)):
eval_results_filename = ((osp.splitext(args.out)[0] + '_results') + osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if (cfg.dataset_type == 'VOCDataset'):
if eval_types:
for eval_type in eval_types:
if (eval_type == 'bbox'):
test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
logger = ('print' if args.summaries else None)
(mean_ap, eval_results) = voc_eval_with_return(args.out, test_dataset, args.iou_thr, logger)
aggregated_results[corruption][corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation is supported for pascal voc')
elif eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if (eval_types == ['proposal_fast']):
result_file = args.out
elif (not isinstance(outputs[0], dict)):
result_files = dataset.results2json(outputs, args.out)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out
(+ '.{}'.format(name))
result_files = dataset.results2json(outputs_, result_file)
eval_results = coco_eval_with_return(result_files, eval_types, dataset.coco)
aggregated_results[corruption][corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;\nUse --eval to select a task')
mmcv.dump(aggregated_results, eval_results_filename)
if (rank == 0):
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if (cfg.dataset_type == 'VOCDataset'):
get_results(eval_results_filename, dataset='voc', prints=prints, aggregate=aggregate)
else:
get_results(eval_results_filename, dataset='coco', prints=prints, aggregate=aggregate)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if args.autoscale_lr:
cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * len(cfg.gpu_ids)) / 8)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join(['{}: {}'.format(k, v) for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
if (args.seed is not None):
logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
|
def convert(in_file, out_file):
'Convert keys in checkpoints.\n\n There can be some breaking changes during the development of mmdetection,\n and this tool is used for upgrading checkpoints trained with old versions\n to the latest one.\n '
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
for (key, val) in in_state_dict.items():
m = re.search('(cls_convs|reg_convs).\\d.(weight|bias)', key)
if (m is not None):
param = m.groups()[1]
new_key = key.replace(param, 'conv.{}'.format(param))
out_state_dict[new_key] = val
continue
out_state_dict[key] = val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
|
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
|
@HEADS.register_module
class SepcFreeAnchorRetinaHead(FreeAnchorRetinaHead):
def forward_single(self, x):
if (not isinstance(x, list)):
x = [x, x]
cls_feat = x[0]
reg_feat = x[1]
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return (cls_score, bbox_pred)
|
@HEADS.register_module
class SepcRetinaHead(RetinaHead):
def forward_single(self, x):
if (not isinstance(x, list)):
x = [x, x]
cls_feat = x[0]
reg_feat = x[1]
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return (cls_score, bbox_pred)
|
@NECKS.register_module
class SEPC(nn.Module):
def __init__(self, in_channels=([256] * 5), out_channels=256, num_outs=5, pconv_deform=False, lcconv_deform=False, iBN=False, Pconv_num=4):
super(SEPC, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
assert (num_outs == 5)
self.fp16_enabled = False
self.iBN = iBN
self.Pconvs = nn.ModuleList()
for i in range(Pconv_num):
self.Pconvs.append(PConvModule(in_channels[i], out_channels, iBN=self.iBN, part_deform=pconv_deform))
self.lconv = sepc_conv(256, 256, kernel_size=3, dilation=1, part_deform=lcconv_deform)
self.cconv = sepc_conv(256, 256, kernel_size=3, dilation=1, part_deform=lcconv_deform)
self.relu = nn.ReLU()
if self.iBN:
self.lbn = nn.BatchNorm2d(256)
self.cbn = nn.BatchNorm2d(256)
self.init_weights()
def init_weights(self):
for str in ['l', 'c']:
m = getattr(self, (str + 'conv'))
init.normal_(m.weight.data, 0, 0.01)
if (m.bias is not None):
m.bias.data.zero_()
@auto_fp16()
def forward(self, inputs):
assert (len(inputs) == len(self.in_channels))
x = inputs
for pconv in self.Pconvs:
x = pconv(x)
cls = [self.cconv(level, item) for (level, item) in enumerate(x)]
loc = [self.lconv(level, item) for (level, item) in enumerate(x)]
if self.iBN:
cls = iBN(cls, self.cbn)
loc = iBN(loc, self.lbn)
outs = [[self.relu(s), self.relu(l)] for (s, l) in zip(cls, loc)]
return tuple(outs)
|
class PConvModule(nn.Module):
def __init__(self, in_channels=256, out_channels=256, kernel_size=[3, 3, 3], dilation=[1, 1, 1], groups=[1, 1, 1], iBN=False, part_deform=False):
super(PConvModule, self).__init__()
self.iBN = iBN
self.Pconv = nn.ModuleList()
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[0], dilation=dilation[0], groups=groups[0], padding=((kernel_size[0] + ((dilation[0] - 1) * 2)) // 2), part_deform=part_deform))
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[1], dilation=dilation[1], groups=groups[1], padding=((kernel_size[1] + ((dilation[1] - 1) * 2)) // 2), part_deform=part_deform))
self.Pconv.append(sepc_conv(in_channels, out_channels, kernel_size=kernel_size[2], dilation=dilation[2], groups=groups[2], padding=((kernel_size[2] + ((dilation[2] - 1) * 2)) // 2), stride=2, part_deform=part_deform))
if self.iBN:
self.bn = nn.BatchNorm2d(256)
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
for m in self.Pconv:
init.normal_(m.weight.data, 0, 0.01)
if (m.bias is not None):
m.bias.data.zero_()
def forward(self, x):
next_x = []
for (level, feature) in enumerate(x):
temp_fea = self.Pconv[1](level, feature)
if (level > 0):
temp_fea += self.Pconv[2](level, x[(level - 1)])
if (level < (len(x) - 1)):
temp_fea += F.upsample_bilinear(self.Pconv[0](level, x[(level + 1)]), size=[temp_fea.size(2), temp_fea.size(3)])
next_x.append(temp_fea)
if self.iBN:
next_x = iBN(next_x, self.bn)
next_x = [self.relu(item) for item in next_x]
return next_x
|
def iBN(fms, bn):
sizes = [p.shape[2:] for p in fms]
(n, c) = (fms[0].shape[0], fms[0].shape[1])
fm = torch.cat([p.view(n, c, 1, (- 1)) for p in fms], dim=(- 1))
fm = bn(fm)
fm = torch.split(fm, [(s[0] * s[1]) for s in sizes], dim=(- 1))
return [p.view(n, c, s[0], s[1]) for (p, s) in zip(fm, sizes)]
|
class sepc_conv(DeformConv):
def __init__(self, *args, part_deform=False, **kwargs):
super(sepc_conv, self).__init__(*args, **kwargs)
self.part_deform = part_deform
if self.part_deform:
self.conv_offset = nn.Conv2d(self.in_channels, (((self.deformable_groups * 2) * self.kernel_size[0]) * self.kernel_size[1]), kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), bias=True)
self.init_offset()
self.bias = nn.Parameter(torch.zeros(self.out_channels))
self.start_level = 1
def init_offset(self):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, i, x):
if ((i < self.start_level) or (not self.part_deform)):
return torch.nn.functional.conv2d(x, self.weight, bias=self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
offset = self.conv_offset(x)
return (deform_conv(x, offset, self.weight, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups) + self.bias.unsqueeze(0).unsqueeze((- 1)).unsqueeze((- 1)))
|
class MultipleKVAction(argparse.Action):
'\n argparse action to split an argument into KEY=VALUE form\n on the first = and append to a dictionary. List options should\n be passed as comma separated values, i.e KEY=V1,V2,V3\n '
def _parse_int_float_bool(self, val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if (val.lower() in ['true', 'false']):
return (True if (val.lower() == 'true') else False)
return val
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
(key, val) = kv.split('=', maxsplit=1)
val = [self._parse_int_float_bool(v) for v in val.split(',')]
if (len(val) == 1):
val = val[0]
options[key] = val
setattr(namespace, self.dest, options)
|
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument('--fuse_conv_bn', action='store_true', help='Whether to fuse conv and bn, this will slightly increasethe inference speed')
parser.add_argument('--format_only', action='store_true', help='Format the output results without perform evaluation. It isuseful when you want to format the result to a specific format and submit it to the test server')
parser.add_argument('--eval', type=str, nargs='+', help='evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--gpu_collect', action='store_true', help='whether to use gpu to collect results.')
parser.add_argument('--tmpdir', help='tmp directory used for collecting results from multiple workers, available when gpu_collect is not specified')
parser.add_argument('--options', nargs='+', action=MultipleKVAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
assert (args.out or args.eval or args.format_only or args.show), 'Please specify at least one operation (save/eval/format/show the results) with the argument "--out", "--eval", "--format_only" or "--show"'
if (args.eval and args.format_only):
raise ValueError('--eval and --format_only cannot be both specified')
if ((args.out is not None) and (not args.out.endswith(('.pkl', '.pickle')))):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, imgs_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if (fp16_cfg is not None):
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
if ('CLASSES' in checkpoint['meta']):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if (not distributed):
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir, args.gpu_collect)
(rank, _) = get_dist_info()
if (rank == 0):
if args.out:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
kwargs = ({} if (args.options is None) else args.options)
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
|
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work_dir', help='the dir to save logs and models')
parser.add_argument('--resume_from', help='the checkpoint file to resume from')
parser.add_argument('--validate', action='store_true', help='whether to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use (only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if ('LOCAL_RANK' not in os.environ):
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
|
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if (args.work_dir is not None):
cfg.work_dir = args.work_dir
if (args.resume_from is not None):
cfg.resume_from = args.resume_from
if (args.gpu_ids is not None):
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = (range(1) if (args.gpus is None) else range(args.gpus))
if args.autoscale_lr:
cfg.optimizer['lr'] = ((cfg.optimizer['lr'] * len(cfg.gpu_ids)) / 8)
if (args.launcher == 'none'):
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, '{}.log'.format(timestamp))
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
meta = dict()
env_info_dict = collect_env()
env_info = '\n'.join(['{}: {}'.format(k, v) for (k, v) in env_info_dict.items()])
dash_line = (('-' * 60) + '\n')
logger.info((((('Environment info:\n' + dash_line) + env_info) + '\n') + dash_line))
meta['env_info'] = env_info
logger.info('Distributed training: {}'.format(distributed))
logger.info('Config:\n{}'.format(cfg.text))
if (args.seed is not None):
logger.info('Set random seed to {}, deterministic: {}'.format(args.seed, args.deterministic))
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
model = build_detector(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if (len(cfg.workflow) == 2):
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if (cfg.checkpoint_config is not None):
cfg.checkpoint_config.meta = dict(mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES)
model.CLASSES = datasets[0].CLASSES
train_detector(model, datasets, cfg, distributed=distributed, validate=args.validate, timestamp=timestamp, meta=meta)
|
def generate_previews():
gcoll = avt_preview_collections['thumbnail_previews']
image_location = gcoll.images_location
enum_items = []
gallery = ['dress01.jpg', 'dress02.jpg', 'dress03.jpg', 'dress04.jpg', 'dress05.jpg', 'dress06.jpg', 'glasses01.jpg', 'glasses02.jpg', 'hat01.jpg', 'hat02.jpg', 'hat03.jpg', 'hat04.jpg', 'jacket01.jpg', 'jacket02.jpg', 'pants01.jpg', 'pants02.jpg', 'pants03.jpg', 'pants04.jpg', 'pants05.jpg', 'pants06.jpg', 'shirt01.jpg', 'shirt02.jpg', 'shirt03.jpg', 'shirt04.jpg', 'shirt05.jpg', 'shirt06.jpg', 'shirt07.jpg', 'shoes01.jpg', 'shoes02.jpg', 'shoes03.jpg', 'shoes04.jpg', 'skirt01.jpg', 'skirt02.jpg', 'suit01.jpg', 'swimming01.jpg', 'swimming02.jpg', 'swimming03.jpg', 'swimming04.jpg']
a = 0
for i in gallery:
a = (a + 1)
imagename = i.split('.')[0]
filepath = ((image_location + '/') + i)
thumb = gcoll.load(filepath, filepath, 'IMAGE')
enum_items.append((i, i, imagename, thumb.icon_id, a))
return enum_items
|
def update_weights(self, context):
global mAvt
if (mAvt.body is not None):
obj = mAvt.body
else:
reload_avatar()
mAvt.val_breast = self.val_breast
mAvt.val_torso = self.val_torso
mAvt.val_hips = (- self.val_hips)
mAvt.val_armslegs = self.val_limbs
mAvt.val_weight = (- self.val_weight)
mAvt.val_strength = self.val_strength
mAvt.refresh_shape(obj)
mAvt.np_mesh = mAvt.read_verts(obj.data)
mAvt.np_mesh_diff = (mAvt.np_mesh - mAvt.np_mesh_prev)
for object in bpy.data.objects:
if ((object.type == 'MESH') and (object.name != 'Avatar:Body')):
mAvt.deform_cloth(cloth_name=str(object.name))
|
def load_model_from_blend_file(filename):
with bpy.data.libraries.load(filename) as (data_from, data_to):
data_to.objects = [name for name in data_from.objects]
for obj in data_to.objects:
bpy.context.scene.collection.objects.link(obj)
|
def reload_avatar():
global mAvt
mAvt.load_shape_model()
mAvt.eyes = bpy.data.objects['Avatar:High-poly']
mAvt.body = bpy.data.objects['Avatar:Body']
mAvt.skel = bpy.data.objects['Avatar']
mAvt.armature = bpy.data.armatures['Avatar']
mAvt.skel_ref = motion_utils.get_rest_pose(mAvt.skel, mAvt.list_bones)
mAvt.hips_pos = (mAvt.skel.matrix_world @ Matrix.Translation(mAvt.skel.pose.bones['Hips'].head)).to_translation()
list_matrices2 = []
for bone in mAvt.skel.pose.bones:
list_matrices2.append(bone.matrix_basis.copy())
mAvt.list_matrices_basis = list_matrices2
list_matrices3 = []
for bone in mAvt.skel.data.bones:
list_matrices3.append(bone.matrix_local.copy())
mAvt.list_matrices_local = list_matrices3
size = len(mAvt.body.data.vertices)
mAvt.body_kdtree = mathutils.kdtree.KDTree(size)
for (i, v) in enumerate(mAvt.body.data.vertices):
mAvt.body_kdtree.insert(v.co, i)
mAvt.body_kdtree.balance()
|
class AVATAR_OT_LoadModel(bpy.types.Operator):
bl_idname = 'avt.load_model'
bl_label = 'Load human model'
bl_description = 'Loads a parametric naked human model'
def execute(self, context):
global mAvt
global avt_path
scn = context.scene
obj = context.active_object
model_file = ('%s/body/models/avatar.blend' % avt_path)
load_model_from_blend_file(model_file)
mAvt.load_shape_model()
mAvt.eyes = bpy.data.objects['Avatar:High-poly']
mAvt.body = bpy.data.objects['Avatar:Body']
mAvt.skel = bpy.data.objects['Avatar']
mAvt.armature = bpy.data.armatures['Avatar']
mAvt.skel_ref = motion_utils.get_rest_pose(mAvt.skel, mAvt.list_bones)
mAvt.hips_pos = (mAvt.skel.matrix_world @ Matrix.Translation(mAvt.skel.pose.bones['Hips'].head)).to_translation()
list_matrices2 = []
for bone in mAvt.skel.pose.bones:
list_matrices2.append(bone.matrix_basis.copy())
mAvt.list_matrices_basis = list_matrices2
list_matrices3 = []
for bone in mAvt.skel.data.bones:
list_matrices3.append(bone.matrix_local.copy())
mAvt.list_matrices_local = list_matrices3
size = len(mAvt.body.data.vertices)
mAvt.body_kdtree = mathutils.kdtree.KDTree(size)
for (i, v) in enumerate(mAvt.body.data.vertices):
mAvt.body_kdtree.insert(v.co, i)
mAvt.body_kdtree.balance()
bpy.context.view_layer.objects.active = mAvt.body
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.modifier_add(type='COLLISION')
import material_utils
importlib.reload(material_utils)
skin_mat = material_utils.create_material_generic('skin', 0, 1)
(tex_img, tex_norm, tex_spec) = dressing.read_file_textures(avt_path, 'skin')
material_utils.assign_textures_generic_mat(mAvt.body, skin_mat, tex_img, tex_norm, tex_spec)
eyes_mat = material_utils.create_material_generic('eyes', 0, 1)
(tex_img, tex_norm, tex_spec) = dressing.read_file_textures(avt_path, 'eyes')
material_utils.assign_textures_generic_mat(mAvt.eyes, eyes_mat, tex_img, tex_norm, tex_spec)
return {'FINISHED'}
|
class AVATAR_OT_SetBodyShape(bpy.types.Operator):
bl_idname = 'avt.set_body_shape'
bl_label = 'Set Body Shape'
bl_description = 'Set Body Shape'
def execute(self, context):
global mAvt
obj = mAvt.body
cp_vals = obj.data.copy()
mAvt.np_mesh_prev = mAvt.read_verts(cp_vals)
mAvt.refresh_shape(obj)
mAvt.np_mesh = mAvt.read_verts(obj.data)
mAvt.np_mesh_diff = (mAvt.np_mesh - mAvt.np_mesh_prev)
for object in bpy.data.objects:
if ((object.type == 'MESH') and (object.name != 'Avatar:Body')):
mAvt.deform_cloth(cloth_name=str(object.name))
return {'FINISHED'}
|
class AVATAR_OT_ResetParams(bpy.types.Operator):
bl_idname = 'avt.reset_params'
bl_label = 'Reset Parameters'
bl_description = 'Reset original parameters of body shape'
def execute(self, context):
global mAvt
obj = bpy.data.objects['Avatar:Body']
cp_vals = obj.data.copy()
mAvt.np_mesh_prev = mAvt.read_verts(cp_vals)
obj.val_breast = obj.val_torso = obj.val_hips = obj.val_limbs = 0.0
obj.val_weight = obj.val_strength = 0.0
mAvt.refresh_shape(obj)
mAvt.np_mesh = mAvt.read_verts(obj.data)
mAvt.np_mesh_diff = (mAvt.np_mesh - mAvt.np_mesh_prev)
for object in bpy.data.objects:
if ((object.type == 'MESH') and (object.name != 'Avatar:Body')):
mAvt.deform_cloth(cloth_name=str(object.name))
return {'FINISHED'}
|
class AVATAR_PT_LoadPanel(bpy.types.Panel):
bl_idname = 'AVATAR_PT_LoadPanel'
bl_label = 'Load model'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Avatar'
bpy.types.Object.val_breast = FloatProperty(name='Breast Size', description='Breasts Size', default=0, min=0.0, max=1.0, precision=2, update=update_weights)
bpy.types.Object.val_torso = FloatProperty(name='Shoulders Fat', description='Shoulders Fat', default=0, min=(- 0.3), max=0.3, precision=2, update=update_weights)
bpy.types.Object.val_limbs = FloatProperty(name='Limbs Fat', description='Limbs Fat', default=0, min=0.0, max=1.0, precision=2, update=update_weights)
bpy.types.Object.val_hips = FloatProperty(name='Hips Fat', description='Hips Fat', default=0, min=0.0, max=1.0, precision=2, update=update_weights)
bpy.types.Object.val_weight = FloatProperty(name='Weight', description='Weight', default=0, min=(- 0.5), max=1.5, precision=2, update=update_weights)
bpy.types.Object.val_strength = FloatProperty(name='Strength', description='Body Strength', default=0, min=0.0, max=0.5, precision=2, update=update_weights)
def draw(self, context):
layout = self.layout
obj = context.object
scene = context.scene
row = layout.row()
row.operator('avt.load_model', text='Load human')
if ((obj is None) or (obj.type not in ['MESH', 'ARMATURE'])):
return
layout.separator()
layout.prop(obj, 'val_breast')
layout.prop(obj, 'val_torso')
layout.prop(obj, 'val_limbs')
layout.prop(obj, 'val_hips')
layout.prop(obj, 'val_weight')
layout.prop(obj, 'val_strength')
layout.separator()
row = layout.row()
row.operator('avt.reset_params', text='Reset parameters')
|
class AVATAR_OT_CreateStudio(bpy.types.Operator):
bl_idname = 'avt.create_studio'
bl_label = 'Create Studio'
bl_description = 'Set up a lighting studio for high quality renderings'
def execute(self, context):
global avt_path
dressing.load_studio(avt_path)
return {'FINISHED'}
|
class AVATAR_OT_WearCloth(bpy.types.Operator):
bl_idname = 'avt.wear_cloth'
bl_label = 'Wear Cloth'
bl_description = 'Dress human with selected cloth'
def execute(self, context):
global avt_path
scn = context.scene
obj = context.active_object
iconname = bpy.context.scene.avt_thumbnails
iconname = iconname.split('.')[0]
for o in bpy.context.scene.objects:
o.select_set(False)
c_file = ('%s/dressing/models/clothes/%s.obj' % (avt_path, iconname))
dressing.load_cloth(c_file, iconname)
cloth = bpy.data.objects[iconname]
cloth.select_set(True)
import material_utils
importlib.reload(material_utils)
mat_id = dressing.get_material_id(iconname)
cloth_mat = material_utils.create_material_generic(iconname, 0, mat_id)
(tex_img, tex_norm, tex_spec) = dressing.read_file_textures(avt_path, iconname)
material_utils.assign_textures_generic_mat(cloth, cloth_mat, tex_img, tex_norm, tex_spec)
return {'FINISHED'}
|
class AVATAR_PT_DressingPanel(bpy.types.Panel):
bl_idname = 'AVATAR_PT_DressingPanel'
bl_label = 'Dress Human'
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Avatar'
def draw(self, context):
layout = self.layout
obj = context.object
scn = context.scene
row = layout.row()
row.template_icon_view(context.scene, 'avt_thumbnails')
row = layout.row()
col = row.column()
cols = col.row()
row = layout.row()
row.operator('avt.wear_cloth', text='Load selected cloth')
layout.separator()
row = layout.row()
row.operator('avt.create_studio', text='Create studio')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.