repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
mmyolo
mmyolo-main/tests/test_models/test_task_modules/test_assigners/test_batch_atss_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmyolo.models.task_modules.assigners import BatchATSSAssigner class TestBatchATSSAssigner(TestCase): def test_batch_atss_assigner(self): num_classes = 2 batch_size = 2 batch_atss_assigner = BatchATSSAssigner( topk=3, iou_calculator=dict(type='mmdet.BboxOverlaps2D'), num_classes=num_classes) priors = torch.FloatTensor([ [4., 4., 8., 8.], [12., 4., 8., 8.], [20., 4., 8., 8.], [28., 4., 8., 8.], ]).repeat(21, 1) gt_bboxes = torch.FloatTensor([ [0, 0, 60, 93], [229, 0, 532, 157], ]).unsqueeze(0).repeat(batch_size, 1, 1) gt_labels = torch.LongTensor([ [0], [11], ]).unsqueeze(0).repeat(batch_size, 1, 1) num_level_bboxes = [64, 16, 4] pad_bbox_flag = torch.FloatTensor([ [1], [0], ]).unsqueeze(0).repeat(batch_size, 1, 1) pred_bboxes = torch.FloatTensor([ [-4., -4., 12., 12.], [4., -4., 20., 12.], [12., -4., 28., 12.], [20., -4., 36., 12.], ]).unsqueeze(0).repeat(batch_size, 21, 1) batch_assign_result = batch_atss_assigner.forward( pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, pad_bbox_flag) assigned_labels = batch_assign_result['assigned_labels'] assigned_bboxes = batch_assign_result['assigned_bboxes'] assigned_scores = batch_assign_result['assigned_scores'] fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, 4])) self.assertEqual(assigned_scores.shape, torch.Size([batch_size, 84, num_classes])) self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) def test_batch_atss_assigner_with_empty_gt(self): """Test corner case where an image might have no true detections.""" num_classes = 2 batch_size = 2 batch_atss_assigner = BatchATSSAssigner( topk=3, iou_calculator=dict(type='mmdet.BboxOverlaps2D'), num_classes=num_classes) priors = torch.FloatTensor([ [4., 4., 8., 8.], [12., 4., 8., 8.], [20., 4., 8., 8.], [28., 4., 8., 8.], ]).repeat(21, 1) num_level_bboxes = [64, 16, 4] pad_bbox_flag = torch.FloatTensor([ [1], [0], ]).unsqueeze(0).repeat(batch_size, 1, 1) pred_bboxes = torch.FloatTensor([ [-4., -4., 12., 12.], [4., -4., 20., 12.], [12., -4., 28., 12.], [20., -4., 36., 12.], ]).unsqueeze(0).repeat(batch_size, 21, 1) gt_bboxes = torch.zeros(batch_size, 0, 4) gt_labels = torch.zeros(batch_size, 0, 1) batch_assign_result = batch_atss_assigner.forward( pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, pad_bbox_flag) assigned_labels = batch_assign_result['assigned_labels'] assigned_bboxes = batch_assign_result['assigned_bboxes'] assigned_scores = batch_assign_result['assigned_scores'] fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, 4])) self.assertEqual(assigned_scores.shape, torch.Size([batch_size, 84, num_classes])) self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) def test_batch_atss_assigner_with_empty_boxs(self): """Test corner case where a network might predict no boxes.""" num_classes = 2 batch_size = 2 batch_atss_assigner = BatchATSSAssigner( topk=3, iou_calculator=dict(type='mmdet.BboxOverlaps2D'), num_classes=num_classes) priors = torch.zeros(84, 4) gt_bboxes = torch.FloatTensor([ [0, 0, 60, 93], [229, 0, 532, 157], ]).unsqueeze(0).repeat(batch_size, 1, 1) gt_labels = torch.LongTensor([ [0], [11], ]).unsqueeze(0).repeat(batch_size, 1, 1) num_level_bboxes = [64, 16, 4] pad_bbox_flag = torch.FloatTensor([[1], [0]]).unsqueeze(0).repeat( batch_size, 1, 1) pred_bboxes = torch.FloatTensor([ [-4., -4., 12., 12.], [4., -4., 20., 12.], [12., -4., 28., 12.], [20., -4., 36., 12.], ]).unsqueeze(0).repeat(batch_size, 21, 1) batch_assign_result = batch_atss_assigner.forward( pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, pad_bbox_flag) assigned_labels = batch_assign_result['assigned_labels'] assigned_bboxes = batch_assign_result['assigned_bboxes'] assigned_scores = batch_assign_result['assigned_scores'] fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, 4])) self.assertEqual(assigned_scores.shape, torch.Size([batch_size, 84, num_classes])) self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84])) def test_batch_atss_assigner_with_empty_boxes_and_gt(self): """Test corner case where a network might predict no boxes and no gt.""" num_classes = 2 batch_size = 2 batch_atss_assigner = BatchATSSAssigner( topk=3, iou_calculator=dict(type='mmdet.BboxOverlaps2D'), num_classes=num_classes) priors = torch.zeros(84, 4) gt_bboxes = torch.zeros(batch_size, 0, 4) gt_labels = torch.zeros(batch_size, 0, 1) num_level_bboxes = [64, 16, 4] pad_bbox_flag = torch.zeros(batch_size, 0, 1) pred_bboxes = torch.zeros(batch_size, 0, 4) batch_assign_result = batch_atss_assigner.forward( pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes, pad_bbox_flag) assigned_labels = batch_assign_result['assigned_labels'] assigned_bboxes = batch_assign_result['assigned_bboxes'] assigned_scores = batch_assign_result['assigned_scores'] fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior'] self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, 4])) self.assertEqual(assigned_scores.shape, torch.Size([batch_size, 84, num_classes])) self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84]))
7,366
40.857955
79
py
mmyolo
mmyolo-main/tests/test_models/test_task_modules/test_assigners/__init__.py
# Copyright (c) OpenMMLab. All rights reserved.
48
23.5
47
py
mmyolo
mmyolo-main/tests/test_models/test_task_modules/test_assigners/test_batch_task_aligned_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmyolo.models.task_modules.assigners import BatchTaskAlignedAssigner class TestBatchTaskAlignedAssigner(TestCase): def test_batch_task_aligned_assigner(self): batch_size = 2 num_classes = 4 assigner = BatchTaskAlignedAssigner( num_classes=num_classes, alpha=1, beta=6, topk=13, eps=1e-9) pred_scores = torch.FloatTensor([ [0.1, 0.2], [0.2, 0.3], [0.3, 0.4], [0.4, 0.5], ]).unsqueeze(0).repeat(batch_size, 21, 1) priors = torch.FloatTensor([ [0, 0, 4., 4.], [0, 0, 12., 4.], [0, 0, 20., 4.], [0, 0, 28., 4.], ]).repeat(21, 1) gt_bboxes = torch.FloatTensor([ [0, 0, 60, 93], [229, 0, 532, 157], ]).unsqueeze(0).repeat(batch_size, 1, 1) gt_labels = torch.LongTensor([[0], [1] ]).unsqueeze(0).repeat(batch_size, 1, 1) pad_bbox_flag = torch.FloatTensor([[1], [0]]).unsqueeze(0).repeat( batch_size, 1, 1) pred_bboxes = torch.FloatTensor([ [-4., -4., 12., 12.], [4., -4., 20., 12.], [12., -4., 28., 12.], [20., -4., 36., 12.], ]).unsqueeze(0).repeat(batch_size, 21, 1) assign_result = assigner.forward(pred_bboxes, pred_scores, priors, gt_labels, gt_bboxes, pad_bbox_flag) assigned_labels = assign_result['assigned_labels'] assigned_bboxes = assign_result['assigned_bboxes'] assigned_scores = assign_result['assigned_scores'] fg_mask_pre_prior = assign_result['fg_mask_pre_prior'] self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84])) self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84, 4])) self.assertEqual(assigned_scores.shape, torch.Size([batch_size, 84, num_classes])) self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84]))
2,212
37.824561
79
py
mmyolo
mmyolo-main/tests/test_models/test_plugins/test_cbam.py
# Copyright (c) OpenMMLab. All rights reserved. from unittest import TestCase import torch from mmyolo.models.plugins import CBAM from mmyolo.utils import register_all_modules register_all_modules() class TestCBAM(TestCase): def test_forward(self): tensor_shape = (2, 16, 20, 20) images = torch.randn(*tensor_shape) cbam = CBAM(16) out = cbam(images) self.assertEqual(out.shape, tensor_shape) # test other ratio cbam = CBAM(16, reduce_ratio=8) out = cbam(images) self.assertEqual(out.shape, tensor_shape) # test other act_cfg in ChannelAttention cbam = CBAM(in_channels=16, act_cfg=dict(type='Sigmoid')) out = cbam(images) self.assertEqual(out.shape, tensor_shape)
783
23.5
65
py
mmyolo
mmyolo-main/tests/test_models/test_plugins/__init__.py
# Copyright (c) OpenMMLab. All rights reserved.
48
23.5
47
py
mmyolo
mmyolo-main/tests/test_datasets/test_yolov5_voc.py
# Copyright (c) OpenMMLab. All rights reserved. import unittest from mmengine.dataset import ConcatDataset from mmyolo.datasets import YOLOv5VOCDataset from mmyolo.utils import register_all_modules register_all_modules() class TestYOLOv5VocDataset(unittest.TestCase): def test_batch_shapes_cfg(self): batch_shapes_cfg = dict( type='BatchShapePolicy', batch_size=2, img_size=640, size_divisor=32, extra_pad_ratio=0.5) # test serialize_data=True dataset = YOLOv5VOCDataset( data_root='tests/data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), test_mode=True, pipeline=[], batch_shapes_cfg=batch_shapes_cfg, ) expected_img_ids = ['000001'] expected_batch_shapes = [[672, 480]] for i, data in enumerate(dataset): assert data['img_id'] == expected_img_ids[i] assert data['batch_shape'].tolist() == expected_batch_shapes[i] def test_prepare_data(self): dataset = YOLOv5VOCDataset( data_root='tests/data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict(filter_empty_gt=False, min_size=0), pipeline=[], serialize_data=True, batch_shapes_cfg=None, ) for data in dataset: assert 'dataset' in data # test with test_mode = True dataset = YOLOv5VOCDataset( data_root='tests/data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict( filter_empty_gt=True, min_size=32, bbox_min_size=None), pipeline=[], test_mode=True, batch_shapes_cfg=None) for data in dataset: assert 'dataset' not in data def test_concat_dataset(self): dataset = ConcatDataset( datasets=[ dict( type='YOLOv5VOCDataset', data_root='tests/data/VOCdevkit/', ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]), dict( type='YOLOv5VOCDataset', data_root='tests/data/VOCdevkit/', ann_file='VOC2012/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2012/'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]) ], ignore_keys='dataset_type') dataset.full_init() self.assertEqual(len(dataset), 2)
3,002
33.517241
75
py
mmyolo
mmyolo-main/tests/test_datasets/test_yolov5_coco.py
# Copyright (c) OpenMMLab. All rights reserved. import unittest from mmyolo.datasets import YOLOv5CocoDataset class TestYOLOv5CocoDataset(unittest.TestCase): def test_batch_shapes_cfg(self): batch_shapes_cfg = dict( type='BatchShapePolicy', batch_size=2, img_size=640, size_divisor=32, extra_pad_ratio=0.5) # test serialize_data=True dataset = YOLOv5CocoDataset( data_prefix=dict(img='imgs'), ann_file='tests/data/coco_sample.json', filter_cfg=dict(filter_empty_gt=False, min_size=0), pipeline=[], serialize_data=True, batch_shapes_cfg=batch_shapes_cfg, ) expected_img_ids = [3, 0, 2, 1] expected_batch_shapes = [[512, 672], [512, 672], [672, 672], [672, 672]] for i, data in enumerate(dataset): assert data['img_id'] == expected_img_ids[i] assert data['batch_shape'].tolist() == expected_batch_shapes[i] # test serialize_data=True dataset = YOLOv5CocoDataset( data_prefix=dict(img='imgs'), ann_file='tests/data/coco_sample.json', filter_cfg=dict(filter_empty_gt=False, min_size=0), pipeline=[], serialize_data=False, batch_shapes_cfg=batch_shapes_cfg, ) expected_img_ids = [3, 0, 2, 1] expected_batch_shapes = [[512, 672], [512, 672], [672, 672], [672, 672]] for i, data in enumerate(dataset): assert data['img_id'] == expected_img_ids[i] assert data['batch_shape'].tolist() == expected_batch_shapes[i] def test_prepare_data(self): dataset = YOLOv5CocoDataset( data_prefix=dict(img='imgs'), ann_file='tests/data/coco_sample.json', filter_cfg=dict(filter_empty_gt=False, min_size=0), pipeline=[], serialize_data=True, batch_shapes_cfg=None, ) for data in dataset: assert 'dataset' in data # test with test_mode = True dataset = YOLOv5CocoDataset( data_prefix=dict(img='imgs'), ann_file='tests/data/coco_sample.json', test_mode=True, pipeline=[]) for data in dataset: assert 'dataset' not in data
2,429
32.75
75
py
mmyolo
mmyolo-main/tests/test_datasets/__init__.py
# Copyright (c) OpenMMLab. All rights reserved.
48
23.5
47
py
mmyolo
mmyolo-main/tests/test_datasets/test_utils.py
# Copyright (c) OpenMMLab. All rights reserved. import unittest import numpy as np import torch from mmdet.structures import DetDataSample from mmdet.structures.bbox import HorizontalBoxes from mmengine.structures import InstanceData from mmyolo.datasets import BatchShapePolicy, yolov5_collate def _rand_bboxes(rng, num_boxes, w, h): cx, cy, bw, bh = rng.rand(num_boxes, 4).T tl_x = ((cx * w) - (w * bw / 2)).clip(0, w) tl_y = ((cy * h) - (h * bh / 2)).clip(0, h) br_x = ((cx * w) + (w * bw / 2)).clip(0, w) br_y = ((cy * h) + (h * bh / 2)).clip(0, h) bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T return bboxes class TestYOLOv5Collate(unittest.TestCase): def test_yolov5_collate(self): rng = np.random.RandomState(0) inputs = torch.randn((3, 10, 10)) data_samples = DetDataSample() gt_instances = InstanceData() bboxes = _rand_bboxes(rng, 4, 6, 8) gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32) labels = rng.randint(1, 2, size=len(bboxes)) gt_instances.labels = torch.LongTensor(labels) data_samples.gt_instances = gt_instances out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)]) self.assertIsInstance(out, dict) self.assertTrue(out['inputs'].shape == (1, 3, 10, 10)) self.assertTrue(out['data_samples'], dict) self.assertTrue(out['data_samples']['bboxes_labels'].shape == (4, 6)) out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)] * 2) self.assertIsInstance(out, dict) self.assertTrue(out['inputs'].shape == (2, 3, 10, 10)) self.assertTrue(out['data_samples'], dict) self.assertTrue(out['data_samples']['bboxes_labels'].shape == (8, 6)) def test_yolov5_collate_with_multi_scale(self): rng = np.random.RandomState(0) inputs = torch.randn((3, 10, 10)) data_samples = DetDataSample() gt_instances = InstanceData() bboxes = _rand_bboxes(rng, 4, 6, 8) gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32) labels = rng.randint(1, 2, size=len(bboxes)) gt_instances.labels = torch.LongTensor(labels) data_samples.gt_instances = gt_instances out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)], use_ms_training=True) self.assertIsInstance(out, dict) self.assertTrue(out['inputs'][0].shape == (3, 10, 10)) self.assertTrue(out['data_samples'], dict) self.assertTrue(out['data_samples']['bboxes_labels'].shape == (4, 6)) self.assertIsInstance(out['inputs'], list) self.assertIsInstance(out['data_samples']['bboxes_labels'], torch.Tensor) out = yolov5_collate( [dict(inputs=inputs, data_samples=data_samples)] * 2, use_ms_training=True) self.assertIsInstance(out, dict) self.assertTrue(out['inputs'][0].shape == (3, 10, 10)) self.assertTrue(out['data_samples'], dict) self.assertTrue(out['data_samples']['bboxes_labels'].shape == (8, 6)) self.assertIsInstance(out['inputs'], list) self.assertIsInstance(out['data_samples']['bboxes_labels'], torch.Tensor) class TestBatchShapePolicy(unittest.TestCase): def test_batch_shape_policy(self): src_data_infos = [{ 'height': 20, 'width': 100, }, { 'height': 11, 'width': 100, }, { 'height': 21, 'width': 100, }, { 'height': 30, 'width': 100, }, { 'height': 10, 'width': 100, }] expected_data_infos = [{ 'height': 10, 'width': 100, 'batch_shape': np.array([96, 672]) }, { 'height': 11, 'width': 100, 'batch_shape': np.array([96, 672]) }, { 'height': 20, 'width': 100, 'batch_shape': np.array([160, 672]) }, { 'height': 21, 'width': 100, 'batch_shape': np.array([160, 672]) }, { 'height': 30, 'width': 100, 'batch_shape': np.array([224, 672]) }] batch_shapes_policy = BatchShapePolicy(batch_size=2) out_data_infos = batch_shapes_policy(src_data_infos) for i in range(5): self.assertEqual( (expected_data_infos[i]['height'], expected_data_infos[i]['width']), (out_data_infos[i]['height'], out_data_infos[i]['width'])) self.assertTrue( np.allclose(expected_data_infos[i]['batch_shape'], out_data_infos[i]['batch_shape']))
4,918
34.388489
79
py
mmyolo
mmyolo-main/tests/test_datasets/test_transforms/test_mix_img_transforms.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import numpy as np import torch from mmdet.structures.bbox import HorizontalBoxes from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmyolo.datasets import YOLOv5CocoDataset from mmyolo.datasets.transforms import Mosaic, Mosaic9, YOLOv5MixUp, YOLOXMixUp from mmyolo.utils import register_all_modules register_all_modules() class TestMosaic(unittest.TestCase): def setUp(self): """Setup the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ self.pre_transform = [ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True) ] self.dataset = YOLOv5CocoDataset( data_prefix=dict( img=osp.join(osp.dirname(__file__), '../../data')), ann_file=osp.join( osp.dirname(__file__), '../../data/coco_sample_color.json'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]) self.results = { 'img': np.random.random((224, 224, 3)), 'img_shape': (224, 224), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), 'dataset': self.dataset } def test_transform(self): # test assertion for invalid img_scale with self.assertRaises(AssertionError): transform = Mosaic(img_scale=640) # test assertion for invalid probability with self.assertRaises(AssertionError): transform = Mosaic(prob=1.5) # test assertion for invalid max_cached_images with self.assertRaises(AssertionError): transform = Mosaic(use_cached=True, max_cached_images=1) transform = Mosaic( img_scale=(12, 10), pre_transform=self.pre_transform) results = transform(copy.deepcopy(self.results)) self.assertTrue(results['img'].shape[:2] == (20, 24)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_no_gt(self): self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32) self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64) self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool) transform = Mosaic( img_scale=(12, 10), pre_transform=self.pre_transform) results = transform(copy.deepcopy(self.results)) self.assertIsInstance(results, dict) self.assertTrue(results['img'].shape[:2] == (20, 24)) self.assertTrue( results['gt_bboxes_labels'].shape[0] == results['gt_bboxes']. shape[0] == results['gt_ignore_flags'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_box_list(self): transform = Mosaic( img_scale=(12, 10), pre_transform=self.pre_transform) results = copy.deepcopy(self.results) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) results = transform(results) self.assertTrue(results['img'].shape[:2] == (20, 24)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_mask(self): rng = np.random.RandomState(0) pre_transform = [ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True, with_mask=True) ] dataset = YOLOv5CocoDataset( data_prefix=dict( img=osp.join(osp.dirname(__file__), '../../data')), ann_file=osp.join( osp.dirname(__file__), '../../data/coco_sample_color.json'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]) results = { 'img': np.random.random((224, 224, 3)), 'img_shape': (224, 224), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), 'gt_masks': PolygonMasks.random(num_masks=3, height=224, width=224, rng=rng), 'dataset': dataset } transform = Mosaic(img_scale=(12, 10), pre_transform=pre_transform) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) results = transform(results) self.assertTrue(results['img'].shape[:2] == (20, 24)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) class TestMosaic9(unittest.TestCase): def setUp(self): """Setup the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ rng = np.random.RandomState(0) self.pre_transform = [ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True) ] self.dataset = YOLOv5CocoDataset( data_prefix=dict( img=osp.join(osp.dirname(__file__), '../../data')), ann_file=osp.join( osp.dirname(__file__), '../../data/coco_sample_color.json'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]) self.results = { 'img': np.random.random((224, 224, 3)), 'img_shape': (224, 224), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), 'gt_masks': BitmapMasks(rng.rand(3, 224, 224), height=224, width=224), 'dataset': self.dataset } def test_transform(self): # test assertion for invalid img_scale with self.assertRaises(AssertionError): transform = Mosaic9(img_scale=640) # test assertion for invalid probability with self.assertRaises(AssertionError): transform = Mosaic9(prob=1.5) # test assertion for invalid max_cached_images with self.assertRaises(AssertionError): transform = Mosaic9(use_cached=True, max_cached_images=1) transform = Mosaic9( img_scale=(12, 10), pre_transform=self.pre_transform) results = transform(copy.deepcopy(self.results)) self.assertTrue(results['img'].shape[:2] == (20, 24)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_no_gt(self): self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32) self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64) self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool) transform = Mosaic9( img_scale=(12, 10), pre_transform=self.pre_transform) results = transform(copy.deepcopy(self.results)) self.assertIsInstance(results, dict) self.assertTrue(results['img'].shape[:2] == (20, 24)) self.assertTrue( results['gt_bboxes_labels'].shape[0] == results['gt_bboxes']. shape[0] == results['gt_ignore_flags'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_box_list(self): transform = Mosaic9( img_scale=(12, 10), pre_transform=self.pre_transform) results = copy.deepcopy(self.results) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) results = transform(results) self.assertTrue(results['img'].shape[:2] == (20, 24)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) class TestYOLOv5MixUp(unittest.TestCase): def setUp(self): """Setup the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ self.pre_transform = [ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True) ] self.dataset = YOLOv5CocoDataset( data_prefix=dict( img=osp.join(osp.dirname(__file__), '../../data')), ann_file=osp.join( osp.dirname(__file__), '../../data/coco_sample_color.json'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]) self.results = { 'img': np.random.random((288, 512, 3)), 'img_shape': (288, 512), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), 'dataset': self.dataset } def test_transform(self): transform = YOLOv5MixUp(pre_transform=self.pre_transform) results = transform(copy.deepcopy(self.results)) self.assertTrue(results['img'].shape[:2] == (288, 512)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) # test assertion for invalid max_cached_images with self.assertRaises(AssertionError): transform = YOLOv5MixUp(use_cached=True, max_cached_images=1) def test_transform_with_box_list(self): results = copy.deepcopy(self.results) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) transform = YOLOv5MixUp(pre_transform=self.pre_transform) results = transform(results) self.assertTrue(results['img'].shape[:2] == (288, 512)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_mask(self): rng = np.random.RandomState(0) pre_transform = [ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True, with_mask=True) ] dataset = YOLOv5CocoDataset( data_prefix=dict( img=osp.join(osp.dirname(__file__), '../../data')), ann_file=osp.join( osp.dirname(__file__), '../../data/coco_sample_color.json'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]) results = { 'img': np.random.random((288, 512, 3)), 'img_shape': (288, 512), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), 'gt_masks': PolygonMasks.random(num_masks=3, height=288, width=512, rng=rng), 'dataset': dataset } transform = YOLOv5MixUp(pre_transform=pre_transform) results = transform(copy.deepcopy(results)) self.assertTrue(results['img'].shape[:2] == (288, 512)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) class TestYOLOXMixUp(unittest.TestCase): def setUp(self): """Setup the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ rng = np.random.RandomState(0) self.pre_transform = [ dict( type='LoadImageFromFile', file_client_args=dict(backend='disk')), dict(type='LoadAnnotations', with_bbox=True) ] self.dataset = YOLOv5CocoDataset( data_prefix=dict( img=osp.join(osp.dirname(__file__), '../../data')), ann_file=osp.join( osp.dirname(__file__), '../../data/coco_sample_color.json'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=[]) self.results = { 'img': np.random.random((224, 224, 3)), 'img_shape': (224, 224), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), 'gt_masks': BitmapMasks(rng.rand(3, 224, 224), height=224, width=224), 'dataset': self.dataset } def test_transform(self): # test assertion for invalid img_scale with self.assertRaises(AssertionError): transform = YOLOXMixUp(img_scale=640) # test assertion for invalid max_cached_images with self.assertRaises(AssertionError): transform = YOLOXMixUp(use_cached=True, max_cached_images=1) transform = YOLOXMixUp( img_scale=(10, 12), ratio_range=(0.8, 1.6), pad_val=114.0, pre_transform=self.pre_transform) # self.results['mix_results'] = [copy.deepcopy(self.results)] results = transform(copy.deepcopy(self.results)) self.assertTrue(results['img'].shape[:2] == (224, 224)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_boxlist(self): results = copy.deepcopy(self.results) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) transform = YOLOXMixUp( img_scale=(10, 12), ratio_range=(0.8, 1.6), pad_val=114.0, pre_transform=self.pre_transform) results = transform(results) self.assertTrue(results['img'].shape[:2] == (224, 224)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool)
17,683
40.221445
79
py
mmyolo
mmyolo-main/tests/test_datasets/test_transforms/__init__.py
# Copyright (c) OpenMMLab. All rights reserved.
48
23.5
47
py
mmyolo
mmyolo-main/tests/test_datasets/test_transforms/test_transforms.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import os.path as osp import unittest import mmcv import numpy as np import torch from mmdet.structures.bbox import HorizontalBoxes from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmyolo.datasets.transforms import (LetterResize, LoadAnnotations, YOLOv5HSVRandomAug, YOLOv5KeepRatioResize, YOLOv5RandomAffine) from mmyolo.datasets.transforms.transforms import (PPYOLOERandomCrop, PPYOLOERandomDistort, YOLOv5CopyPaste) class TestLetterResize(unittest.TestCase): def setUp(self): """Set up the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ rng = np.random.RandomState(0) self.data_info1 = dict( img=np.random.random((300, 400, 3)), gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32), batch_shape=np.array([192, 672], dtype=np.int64), gt_masks=PolygonMasks.random(1, height=300, width=400, rng=rng)) self.data_info2 = dict( img=np.random.random((300, 400, 3)), gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32)) self.data_info3 = dict( img=np.random.random((300, 400, 3)), batch_shape=np.array([192, 672], dtype=np.int64)) self.data_info4 = dict(img=np.random.random((300, 400, 3))) def test_letter_resize(self): # Test allow_scale_up transform = LetterResize(scale=(640, 640), allow_scale_up=False) results = transform(copy.deepcopy(self.data_info1)) self.assertEqual(results['img_shape'], (192, 672, 3)) self.assertTrue( (results['gt_bboxes'] == np.array([[208., 0., 304., 96.]])).all()) self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) self.assertTrue((results['pad_param'] == np.array([0., 0., 208., 208.])).all()) self.assertTrue( (np.array(results['scale_factor'], dtype=np.float32) <= 1.).all()) # Test pad_val transform = LetterResize(scale=(640, 640), pad_val=dict(img=144)) results = transform(copy.deepcopy(self.data_info1)) self.assertEqual(results['img_shape'], (192, 672, 3)) self.assertTrue( (results['gt_bboxes'] == np.array([[208., 0., 304., 96.]])).all()) self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) self.assertTrue((results['pad_param'] == np.array([0., 0., 208., 208.])).all()) self.assertTrue( (np.array(results['scale_factor'], dtype=np.float32) <= 1.).all()) # Test use_mini_pad transform = LetterResize(scale=(640, 640), use_mini_pad=True) results = transform(copy.deepcopy(self.data_info1)) self.assertEqual(results['img_shape'], (192, 256, 3)) self.assertTrue((results['gt_bboxes'] == np.array([[0., 0., 96., 96.]])).all()) self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) self.assertTrue((results['pad_param'] == np.array([0., 0., 0., 0.])).all()) self.assertTrue( (np.array(results['scale_factor'], dtype=np.float32) <= 1.).all()) # Test stretch_only transform = LetterResize(scale=(640, 640), stretch_only=True) results = transform(copy.deepcopy(self.data_info1)) self.assertEqual(results['img_shape'], (192, 672, 3)) self.assertTrue((results['gt_bboxes'] == np.array( [[0., 0., 251.99998474121094, 96.]])).all()) self.assertTrue((results['batch_shape'] == np.array([192, 672])).all()) self.assertTrue((results['pad_param'] == np.array([0., 0., 0., 0.])).all()) # Test transform = LetterResize(scale=(640, 640), pad_val=dict(img=144)) for _ in range(5): input_h, input_w = np.random.randint(100, 700), np.random.randint( 100, 700) output_h, output_w = np.random.randint(100, 700), np.random.randint( 100, 700) data_info = dict( img=np.random.random((input_h, input_w, 3)), gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), batch_shape=np.array([output_h, output_w], dtype=np.int64), gt_masks=PolygonMasks( [[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]], height=input_h, width=input_w)) results = transform(data_info) self.assertEqual(results['img_shape'], (output_h, output_w, 3)) self.assertTrue( (results['batch_shape'] == np.array([output_h, output_w])).all()) # Test without batchshape transform = LetterResize(scale=(640, 640), pad_val=dict(img=144)) for _ in range(5): input_h, input_w = np.random.randint(100, 700), np.random.randint( 100, 700) data_info = dict( img=np.random.random((input_h, input_w, 3)), gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), gt_masks=PolygonMasks( [[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]], height=input_h, width=input_w)) results = transform(data_info) self.assertEqual(results['img_shape'], (640, 640, 3)) # TODO: Testing the existence of multiple scale_factor and pad_param transform = [ YOLOv5KeepRatioResize(scale=(32, 32)), LetterResize(scale=(64, 68), pad_val=dict(img=144)) ] for _ in range(5): input_h, input_w = np.random.randint(100, 700), np.random.randint( 100, 700) output_h, output_w = np.random.randint(100, 700), np.random.randint( 100, 700) data_info = dict( img=np.random.random((input_h, input_w, 3)), gt_bboxes=np.array([[0, 0, 5, 5]], dtype=np.float32), batch_shape=np.array([output_h, output_w], dtype=np.int64)) for t in transform: data_info = t(data_info) # because of the "math.round" operation, # it is unable to strictly restore the original input shape # we just validate the correctness of scale_factor and pad_param self.assertIn('scale_factor', data_info) self.assertIn('pad_param', data_info) pad_param = data_info['pad_param'].reshape(-1, 2).sum( 1) # (top, b, l, r) -> (h, w) scale_factor = np.asarray( data_info['scale_factor'])[::-1] # (w, h) -> (h, w) scale_factor_keepratio = np.min( np.asarray((32, 32)) / (input_h, input_w)) validate_shape = np.floor( np.asarray((input_h, input_w)) * scale_factor_keepratio + 0.5) scale_factor_keepratio = np.floor(scale_factor_keepratio * input_h + 0.5) / input_h scale_factor_letter = (output_h, output_w) / validate_shape scale_factor_letter = ( scale_factor_letter - (pad_param / validate_shape))[np.argmin(scale_factor_letter)] self.assertTrue(data_info['img_shape'][:2] == (output_h, output_w)) self.assertTrue((scale_factor == (scale_factor_keepratio * scale_factor_letter)).all()) class TestYOLOv5KeepRatioResize(unittest.TestCase): def setUp(self): """Set up the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ rng = np.random.RandomState(0) self.data_info1 = dict( img=np.random.random((300, 400, 3)), gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32), gt_masks=PolygonMasks.random( num_masks=1, height=300, width=400, rng=rng)) self.data_info2 = dict(img=np.random.random((300, 400, 3))) def test_yolov5_keep_ratio_resize(self): # test assertion for invalid keep_ratio with self.assertRaises(AssertionError): transform = YOLOv5KeepRatioResize(scale=(640, 640)) transform.keep_ratio = False results = transform(copy.deepcopy(self.data_info1)) # Test with gt_bboxes transform = YOLOv5KeepRatioResize(scale=(640, 640)) results = transform(copy.deepcopy(self.data_info1)) self.assertTrue(transform.keep_ratio, True) self.assertEqual(results['img_shape'], (480, 640)) self.assertTrue( (results['gt_bboxes'] == np.array([[0., 0., 240., 240.]])).all()) self.assertTrue((np.array(results['scale_factor'], dtype=np.float32) == 1.6).all()) # Test only img transform = YOLOv5KeepRatioResize(scale=(640, 640)) results = transform(copy.deepcopy(self.data_info2)) self.assertEqual(results['img_shape'], (480, 640)) self.assertTrue((np.array(results['scale_factor'], dtype=np.float32) == 1.6).all()) class TestYOLOv5HSVRandomAug(unittest.TestCase): def setUp(self): """Set up the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ self.data_info = dict( img=mmcv.imread( osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color')) def test_yolov5_hsv_random_aug(self): # Test with gt_bboxes transform = YOLOv5HSVRandomAug( hue_delta=0.015, saturation_delta=0.7, value_delta=0.4) results = transform(copy.deepcopy(self.data_info)) self.assertTrue( results['img'].shape[:2] == self.data_info['img'].shape[:2]) class TestLoadAnnotations(unittest.TestCase): def setUp(self): """Set up the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ data_prefix = osp.join(osp.dirname(__file__), '../../data') seg_map = osp.join(data_prefix, 'gray.jpg') self.results = { 'ori_shape': (300, 400), 'seg_map_path': seg_map, 'instances': [{ 'bbox': [0, 0, 10, 20], 'bbox_label': 1, 'mask': [[0, 0, 0, 20, 10, 20, 10, 0]], 'ignore_flag': 0 }, { 'bbox': [10, 10, 110, 120], 'bbox_label': 2, 'mask': [[10, 10, 110, 10, 110, 120, 110, 10]], 'ignore_flag': 0 }, { 'bbox': [50, 50, 60, 80], 'bbox_label': 2, 'mask': [[50, 50, 60, 50, 60, 80, 50, 80]], 'ignore_flag': 1 }] } def test_load_bboxes(self): transform = LoadAnnotations( with_bbox=True, with_label=False, with_seg=False, with_mask=False, box_type=None) results = transform(copy.deepcopy(self.results)) self.assertIn('gt_bboxes', results) self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 10, 20], [10, 10, 110, 120]])).all()) self.assertEqual(results['gt_bboxes'].dtype, np.float32) self.assertTrue( (results['gt_ignore_flags'] == np.array([False, False])).all()) self.assertEqual(results['gt_ignore_flags'].dtype, bool) # test empty instance results = transform({}) self.assertIn('gt_bboxes', results) self.assertTrue(results['gt_bboxes'].shape == (0, 4)) self.assertIn('gt_ignore_flags', results) self.assertTrue(results['gt_ignore_flags'].shape == (0, )) def test_load_labels(self): transform = LoadAnnotations( with_bbox=False, with_label=True, with_seg=False, with_mask=False, ) results = transform(copy.deepcopy(self.results)) self.assertIn('gt_bboxes_labels', results) self.assertTrue((results['gt_bboxes_labels'] == np.array([1, 2])).all()) self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64) # test empty instance results = transform({}) self.assertIn('gt_bboxes_labels', results) self.assertTrue(results['gt_bboxes_labels'].shape == (0, )) class TestYOLOv5RandomAffine(unittest.TestCase): def setUp(self): """Setup the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ self.results = { 'img': np.random.random((224, 224, 3)), 'img_shape': (224, 224), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), } def test_transform(self): # test assertion for invalid translate_ratio with self.assertRaises(AssertionError): transform = YOLOv5RandomAffine(max_translate_ratio=1.5) # test assertion for invalid scaling_ratio_range with self.assertRaises(AssertionError): transform = YOLOv5RandomAffine(scaling_ratio_range=(1.5, 0.5)) with self.assertRaises(AssertionError): transform = YOLOv5RandomAffine(scaling_ratio_range=(0, 0.5)) transform = YOLOv5RandomAffine() results = transform(copy.deepcopy(self.results)) self.assertTrue(results['img'].shape[:2] == (224, 224)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_boxlist(self): results = copy.deepcopy(self.results) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) transform = YOLOv5RandomAffine() results = transform(copy.deepcopy(results)) self.assertTrue(results['img'].shape[:2] == (224, 224)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) class TestPPYOLOERandomCrop(unittest.TestCase): def setUp(self): """Setup the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ self.results = { 'img': np.random.random((224, 224, 3)), 'img_shape': (224, 224), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), } def test_transform(self): transform = PPYOLOERandomCrop() results = transform(copy.deepcopy(self.results)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_boxlist(self): results = copy.deepcopy(self.results) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) transform = PPYOLOERandomCrop() results = transform(copy.deepcopy(results)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) class TestPPYOLOERandomDistort(unittest.TestCase): def setUp(self): """Setup the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ self.results = { 'img': np.random.random((224, 224, 3)), 'img_shape': (224, 224), 'gt_bboxes_labels': np.array([1, 2, 3], dtype=np.int64), 'gt_bboxes': np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]], dtype=np.float32), 'gt_ignore_flags': np.array([0, 0, 1], dtype=bool), } def test_transform(self): # test assertion for invalid prob with self.assertRaises(AssertionError): transform = PPYOLOERandomDistort( hue_cfg=dict(min=-18, max=18, prob=1.5)) # test assertion for invalid num_distort_func with self.assertRaises(AssertionError): transform = PPYOLOERandomDistort(num_distort_func=5) transform = PPYOLOERandomDistort() results = transform(copy.deepcopy(self.results)) self.assertTrue(results['img'].shape[:2] == (224, 224)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == np.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) def test_transform_with_boxlist(self): results = copy.deepcopy(self.results) results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes']) transform = PPYOLOERandomDistort() results = transform(copy.deepcopy(results)) self.assertTrue(results['img'].shape[:2] == (224, 224)) self.assertTrue(results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].shape[0]) self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64) self.assertTrue(results['gt_bboxes'].dtype == torch.float32) self.assertTrue(results['gt_ignore_flags'].dtype == bool) class TestYOLOv5CopyPaste(unittest.TestCase): def setUp(self): """Set up the data info which are used in every test method. TestCase calls functions in this order: setUp() -> testMethod() -> tearDown() -> cleanUp() """ self.data_info = dict( img=np.random.random((300, 400, 3)), gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), gt_masks=PolygonMasks( [[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]], height=300, width=400)) def test_transform(self): # test transform transform = YOLOv5CopyPaste(prob=1.0) results = transform(copy.deepcopy(self.data_info)) self.assertTrue(len(results['gt_bboxes']) == 2) self.assertTrue(len(results['gt_masks']) == 2) rng = np.random.RandomState(0) # test with bitmap with self.assertRaises(AssertionError): results = transform( dict( img=np.random.random((300, 400, 3)), gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32), gt_masks=BitmapMasks( rng.rand(1, 300, 400), height=300, width=400)))
21,175
42.12831
79
py
mmyolo
mmyolo-main/tests/test_utils/test_setup_env.py
# Copyright (c) OpenMMLab. All rights reserved. import datetime import sys from unittest import TestCase from mmengine import DefaultScope from mmyolo.utils import register_all_modules class TestSetupEnv(TestCase): def test_register_all_modules(self): from mmyolo.registry import DATASETS # not init default scope sys.modules.pop('mmyolo.datasets', None) sys.modules.pop('mmyolo.datasets.yolov5_coco', None) DATASETS._module_dict.pop('YOLOv5CocoDataset', None) self.assertFalse('YOLOv5CocoDataset' in DATASETS.module_dict) register_all_modules(init_default_scope=False) self.assertTrue('YOLOv5CocoDataset' in DATASETS.module_dict) # init default scope sys.modules.pop('mmyolo.datasets', None) sys.modules.pop('mmyolo.datasets.yolov5_coco', None) DATASETS._module_dict.pop('YOLOv5CocoDataset', None) self.assertFalse('YOLOv5CocoDataset' in DATASETS.module_dict) register_all_modules(init_default_scope=True) self.assertTrue('YOLOv5CocoDataset' in DATASETS.module_dict) self.assertEqual(DefaultScope.get_current_instance().scope_name, 'mmyolo') # init default scope when another scope is init name = f'test-{datetime.datetime.now()}' DefaultScope.get_instance(name, scope_name='test') with self.assertWarnsRegex( Warning, 'The current default scope "test" is not "mmyolo"'): register_all_modules(init_default_scope=True)
1,544
37.625
77
py
mmyolo
mmyolo-main/tests/test_utils/test_collect_env.py
# Copyright (c) OpenMMLab. All rights reserved. import sys from unittest import TestCase import mmcv import mmdet import mmengine from mmyolo.utils import collect_env class TestCollectEnv(TestCase): def test_collect_env(self): env_info = collect_env() print(env_info) expected_keys = [ 'sys.platform', 'Python', 'CUDA available', 'PyTorch', 'PyTorch compiling details', 'OpenCV', 'MMEngine', 'GCC' ] for key in expected_keys: assert key in env_info if env_info['CUDA available']: for key in ['CUDA_HOME', 'NVCC']: assert key in env_info assert env_info['sys.platform'] == sys.platform assert env_info['Python'] == sys.version.replace('\n', '') assert env_info['MMEngine'] == mmengine.__version__ assert env_info['MMCV'] == mmcv.__version__ assert env_info['MMDetection'] == mmdet.__version__
956
27.147059
68
py
mmyolo
mmyolo-main/tests/test_downstream/test_mmrazor.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import pytest from mmcls.models.backbones.base_backbone import BaseBackbone from mmyolo.testing import get_detector_cfg @pytest.mark.parametrize('cfg_file', [ 'razor/subnets/' 'yolov5_s_spos_shufflenetv2_syncbn_8xb16-300e_coco.py', 'razor/subnets/' 'rtmdet_tiny_ofa_lat31_syncbn_16xb16-300e_coco.py', 'razor/subnets/' 'yolov6_l_attentivenas_a6_d12_syncbn_fast_8xb32-300e_coco.py' ]) def test_razor_backbone_init(cfg_file): model = get_detector_cfg(cfg_file) model_cfg = copy.deepcopy(model.backbone) from mmrazor.registry import MODELS model = MODELS.build(model_cfg) assert isinstance(model, BaseBackbone)
707
31.181818
76
py
mmyolo
mmyolo-main/demo/large_image_demo.py
# Copyright (c) OpenMMLab. All rights reserved. """Perform MMYOLO inference on large images (as satellite imagery) as: ```shell wget -P checkpoint https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth # noqa: E501, E261. python demo/large_image_demo.py \ demo/large_image.jpg \ configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ checkpoint/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth ``` """ import os import random from argparse import ArgumentParser from pathlib import Path import mmcv import numpy as np from mmdet.apis import inference_detector, init_detector from mmengine.config import Config, ConfigDict from mmengine.logging import print_log from mmengine.utils import ProgressBar try: from sahi.slicing import slice_image except ImportError: raise ImportError('Please run "pip install -U sahi" ' 'to install sahi first for large image inference.') from mmyolo.registry import VISUALIZERS from mmyolo.utils import switch_to_deploy from mmyolo.utils.large_image import merge_results_by_nms, shift_predictions from mmyolo.utils.misc import get_file_list def parse_args(): parser = ArgumentParser( description='Perform MMYOLO inference on large images.') parser.add_argument( 'img', help='Image path, include image file, dir and URL.') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--out-dir', default='./output', help='Path to output file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--show', action='store_true', help='Show the detection results') parser.add_argument( '--deploy', action='store_true', help='Switch model to deployment mode') parser.add_argument( '--tta', action='store_true', help='Whether to use test time augmentation') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument( '--patch-size', type=int, default=640, help='The size of patches') parser.add_argument( '--patch-overlap-ratio', type=float, default=0.25, help='Ratio of overlap between two patches') parser.add_argument( '--merge-iou-thr', type=float, default=0.25, help='IoU threshould for merging results') parser.add_argument( '--merge-nms-type', type=str, default='nms', help='NMS type for merging results') parser.add_argument( '--batch-size', type=int, default=1, help='Batch size, must greater than or equal to 1') parser.add_argument( '--debug', action='store_true', help='Export debug results before merging') parser.add_argument( '--save-patch', action='store_true', help='Save the results of each patch. ' 'The `--debug` must be enabled.') args = parser.parse_args() return args def main(): args = parse_args() config = args.config if isinstance(config, (str, Path)): config = Config.fromfile(config) elif not isinstance(config, Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') if 'init_cfg' in config.model.backbone: config.model.backbone.init_cfg = None if args.tta: assert 'tta_model' in config, 'Cannot find ``tta_model`` in config.' \ " Can't use tta !" assert 'tta_pipeline' in config, 'Cannot find ``tta_pipeline`` ' \ "in config. Can't use tta !" config.model = ConfigDict(**config.tta_model, module=config.model) test_data_cfg = config.test_dataloader.dataset while 'dataset' in test_data_cfg: test_data_cfg = test_data_cfg['dataset'] # batch_shapes_cfg will force control the size of the output image, # it is not compatible with tta. if 'batch_shapes_cfg' in test_data_cfg: test_data_cfg.batch_shapes_cfg = None test_data_cfg.pipeline = config.tta_pipeline # TODO: TTA mode will error if cfg_options is not set. # This is an mmdet issue and needs to be fixed later. # build the model from a config file and a checkpoint file model = init_detector( config, args.checkpoint, device=args.device, cfg_options={}) if args.deploy: switch_to_deploy(model) if not os.path.exists(args.out_dir) and not args.show: os.mkdir(args.out_dir) # init visualizer visualizer = VISUALIZERS.build(model.cfg.visualizer) visualizer.dataset_meta = model.dataset_meta # get file list files, source_type = get_file_list(args.img) # start detector inference print(f'Performing inference on {len(files)} images.... ' 'This may take a while.') progress_bar = ProgressBar(len(files)) for file in files: # read image img = mmcv.imread(file) # arrange slices height, width = img.shape[:2] sliced_image_object = slice_image( img, slice_height=args.patch_size, slice_width=args.patch_size, auto_slice_resolution=False, overlap_height_ratio=args.patch_overlap_ratio, overlap_width_ratio=args.patch_overlap_ratio, ) # perform sliced inference slice_results = [] start = 0 while True: # prepare batch slices end = min(start + args.batch_size, len(sliced_image_object)) images = [] for sliced_image in sliced_image_object.images[start:end]: images.append(sliced_image) # forward the model slice_results.extend(inference_detector(model, images)) if end >= len(sliced_image_object): break start += args.batch_size if source_type['is_dir']: filename = os.path.relpath(file, args.img).replace('/', '_') else: filename = os.path.basename(file) img = mmcv.imconvert(img, 'bgr', 'rgb') out_file = None if args.show else os.path.join(args.out_dir, filename) # export debug images if args.debug: # export sliced image results name, suffix = os.path.splitext(filename) shifted_instances = shift_predictions( slice_results, sliced_image_object.starting_pixels, src_image_shape=(height, width)) merged_result = slice_results[0].clone() merged_result.pred_instances = shifted_instances debug_file_name = name + '_debug' + suffix debug_out_file = None if args.show else os.path.join( args.out_dir, debug_file_name) visualizer.set_image(img.copy()) debug_grids = [] for starting_point in sliced_image_object.starting_pixels: start_point_x = starting_point[0] start_point_y = starting_point[1] end_point_x = start_point_x + args.patch_size end_point_y = start_point_y + args.patch_size debug_grids.append( [start_point_x, start_point_y, end_point_x, end_point_y]) debug_grids = np.array(debug_grids) debug_grids[:, 0::2] = np.clip(debug_grids[:, 0::2], 1, img.shape[1] - 1) debug_grids[:, 1::2] = np.clip(debug_grids[:, 1::2], 1, img.shape[0] - 1) palette = np.random.randint(0, 256, size=(len(debug_grids), 3)) palette = [tuple(c) for c in palette] line_styles = random.choices(['-', '-.', ':'], k=len(debug_grids)) visualizer.draw_bboxes( debug_grids, edge_colors=palette, alpha=1, line_styles=line_styles) visualizer.draw_bboxes( debug_grids, face_colors=palette, alpha=0.15) visualizer.draw_texts( list(range(len(debug_grids))), debug_grids[:, :2] + 5, colors='w') visualizer.add_datasample( debug_file_name, visualizer.get_image(), data_sample=merged_result, draw_gt=False, show=args.show, wait_time=0, out_file=debug_out_file, pred_score_thr=args.score_thr, ) if args.save_patch: debug_patch_out_dir = os.path.join(args.out_dir, f'{name}_patch') for i, slice_result in enumerate(slice_results): patch_out_file = os.path.join( debug_patch_out_dir, f'{filename}_slice_{i}_result.jpg') image = mmcv.imconvert(sliced_image_object.images[i], 'bgr', 'rgb') visualizer.add_datasample( 'patch_result', image, data_sample=slice_result, draw_gt=False, show=False, wait_time=0, out_file=patch_out_file, pred_score_thr=args.score_thr, ) image_result = merge_results_by_nms( slice_results, sliced_image_object.starting_pixels, src_image_shape=(height, width), nms_cfg={ 'type': args.merge_nms_type, 'iou_threshold': args.merge_iou_thr }) visualizer.add_datasample( filename, img, data_sample=image_result, draw_gt=False, show=args.show, wait_time=0, out_file=out_file, pred_score_thr=args.score_thr, ) progress_bar.update() if not args.show or (args.debug and args.save_patch): print_log( f'\nResults have been saved at {os.path.abspath(args.out_dir)}') if __name__ == '__main__': main()
10,634
35.050847
197
py
mmyolo
mmyolo-main/demo/featmap_vis_demo.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os from typing import Sequence import mmcv from mmdet.apis import inference_detector, init_detector from mmengine import Config, DictAction from mmengine.registry import init_default_scope from mmengine.utils import ProgressBar from mmyolo.registry import VISUALIZERS from mmyolo.utils.misc import auto_arrange_images, get_file_list def parse_args(): parser = argparse.ArgumentParser(description='Visualize feature map') parser.add_argument( 'img', help='Image path, include image file, dir and URL.') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--out-dir', default='./output', help='Path to output file') parser.add_argument( '--target-layers', default=['backbone'], nargs='+', type=str, help='The target layers to get feature map, if not set, the tool will ' 'specify the backbone') parser.add_argument( '--preview-model', default=False, action='store_true', help='To preview all the model layers') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument( '--show', action='store_true', help='Show the featmap results') parser.add_argument( '--channel-reduction', default='select_max', help='Reduce multiple channels to a single channel') parser.add_argument( '--topk', type=int, default=4, help='Select topk channel to show by the sum of each channel') parser.add_argument( '--arrangement', nargs='+', type=int, default=[2, 2], help='The arrangement of featmap when channel_reduction is ' 'not None and topk > 0') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') args = parser.parse_args() return args class ActivationsWrapper: def __init__(self, model, target_layers): self.model = model self.activations = [] self.handles = [] self.image = None for target_layer in target_layers: self.handles.append( target_layer.register_forward_hook(self.save_activation)) def save_activation(self, module, input, output): self.activations.append(output) def __call__(self, img_path): self.activations = [] results = inference_detector(self.model, img_path) return results, self.activations def release(self): for handle in self.handles: handle.remove() def main(): args = parse_args() cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) init_default_scope(cfg.get('default_scope', 'mmyolo')) channel_reduction = args.channel_reduction if channel_reduction == 'None': channel_reduction = None assert len(args.arrangement) == 2 model = init_detector(args.config, args.checkpoint, device=args.device) if not os.path.exists(args.out_dir) and not args.show: os.mkdir(args.out_dir) if args.preview_model: print(model) print('\n This flag is only show model, if you want to continue, ' 'please remove `--preview-model` to get the feature map.') return target_layers = [] for target_layer in args.target_layers: try: target_layers.append(eval(f'model.{target_layer}')) except Exception as e: print(model) raise RuntimeError('layer does not exist', e) activations_wrapper = ActivationsWrapper(model, target_layers) # init visualizer visualizer = VISUALIZERS.build(model.cfg.visualizer) visualizer.dataset_meta = model.dataset_meta # get file list image_list, source_type = get_file_list(args.img) progress_bar = ProgressBar(len(image_list)) for image_path in image_list: result, featmaps = activations_wrapper(image_path) if not isinstance(featmaps, Sequence): featmaps = [featmaps] flatten_featmaps = [] for featmap in featmaps: if isinstance(featmap, Sequence): flatten_featmaps.extend(featmap) else: flatten_featmaps.append(featmap) img = mmcv.imread(image_path) img = mmcv.imconvert(img, 'bgr', 'rgb') if source_type['is_dir']: filename = os.path.relpath(image_path, args.img).replace('/', '_') else: filename = os.path.basename(image_path) out_file = None if args.show else os.path.join(args.out_dir, filename) # show the results shown_imgs = [] visualizer.add_datasample( 'result', img, data_sample=result, draw_gt=False, show=False, wait_time=0, out_file=None, pred_score_thr=args.score_thr) drawn_img = visualizer.get_image() for featmap in flatten_featmaps: shown_img = visualizer.draw_featmap( featmap[0], drawn_img, channel_reduction=channel_reduction, topk=args.topk, arrangement=args.arrangement) shown_imgs.append(shown_img) shown_imgs = auto_arrange_images(shown_imgs) progress_bar.update() if out_file: mmcv.imwrite(shown_imgs[..., ::-1], out_file) if args.show: visualizer.show(shown_imgs) if not args.show: print(f'All done!' f'\nResults have been saved at {os.path.abspath(args.out_dir)}') # Please refer to the usage tutorial: # https://github.com/open-mmlab/mmyolo/blob/main/docs/zh_cn/user_guides/visualization.md # noqa if __name__ == '__main__': main()
6,508
31.545
95
py
mmyolo
mmyolo-main/demo/video_demo.py
# Copyright (c) OpenMMLab. All rights reserved. """Perform MMYOLO inference on a video as: ```shell wget -P checkpoint https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth # noqa: E501, E261. python demo/video_demo.py \ demo/video_demo.mp4 \ configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py \ checkpoint/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth \ --out demo_result.mp4 ``` """ import argparse import cv2 import mmcv from mmcv.transforms import Compose from mmdet.apis import inference_detector, init_detector from mmengine.utils import track_iter_progress from mmyolo.registry import VISUALIZERS def parse_args(): parser = argparse.ArgumentParser(description='MMYOLO video demo') parser.add_argument('video', help='Video file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument('--out', type=str, help='Output video file') parser.add_argument('--show', action='store_true', help='Show video') parser.add_argument( '--wait-time', type=float, default=1, help='The interval of show (s), 0 is block') args = parser.parse_args() return args def main(): args = parse_args() assert args.out or args.show, \ ('Please specify at least one operation (save/show the ' 'video) with the argument "--out" or "--show"') # build the model from a config file and a checkpoint file model = init_detector(args.config, args.checkpoint, device=args.device) # build test pipeline model.cfg.test_dataloader.dataset.pipeline[ 0].type = 'mmdet.LoadImageFromNDArray' test_pipeline = Compose(model.cfg.test_dataloader.dataset.pipeline) # init visualizer visualizer = VISUALIZERS.build(model.cfg.visualizer) # the dataset_meta is loaded from the checkpoint and # then pass to the model in init_detector visualizer.dataset_meta = model.dataset_meta video_reader = mmcv.VideoReader(args.video) video_writer = None if args.out: fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_writer = cv2.VideoWriter( args.out, fourcc, video_reader.fps, (video_reader.width, video_reader.height)) for frame in track_iter_progress(video_reader): result = inference_detector(model, frame, test_pipeline=test_pipeline) visualizer.add_datasample( name='video', image=frame, data_sample=result, draw_gt=False, show=False, pred_score_thr=args.score_thr) frame = visualizer.get_image() if args.show: cv2.namedWindow('video', 0) mmcv.imshow(frame, 'video', args.wait_time) if args.out: video_writer.write(frame) if video_writer: video_writer.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
3,274
32.762887
197
py
mmyolo
mmyolo-main/demo/deploy_demo.py
# Copyright (c) OpenMMLab. All rights reserved. """Deploy demo for mmdeploy. This script help user to run mmdeploy demo after convert the checkpoint to backends. Usage: python deploy_demo.py img \ config \ checkpoint \ [--deploy-cfg DEPLOY_CFG] \ [--device DEVICE] \ [--out-dir OUT_DIR] \ [--show] \ [--score-thr SCORE_THR] Example: python deploy_demo.py \ ${MMYOLO_PATH}/data/cat/images \ ./yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py \ ./end2end.engine \ --deploy-cfg ./detection_tensorrt-fp16_dynamic-192x192-960x960.py \ --out-dir ${MMYOLO_PATH}/work_dirs/deploy_predict_out \ --device cuda:0 \ --score-thr 0.5 """ import argparse import os import torch from mmengine import ProgressBar from mmyolo.utils.misc import get_file_list try: from mmdeploy.apis.utils import build_task_processor from mmdeploy.utils import get_input_shape, load_config except ImportError: raise ImportError( 'mmdeploy is not installed, please see ' 'https://mmdeploy.readthedocs.io/en/1.x/01-how-to-build/build_from_source.html' # noqa ) def parse_args(): parser = argparse.ArgumentParser(description='For mmdeploy predict') parser.add_argument( 'img', help='Image path, include image file, dir and URL.') parser.add_argument('config', help='model config root') parser.add_argument('checkpoint', help='checkpoint backend model path') parser.add_argument('--deploy-cfg', help='deploy config path') parser.add_argument( '--device', default='cuda:0', help='device used for conversion') parser.add_argument( '--out-dir', default='./output', help='Path to output file') parser.add_argument( '--show', action='store_true', help='Show the detection results') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') args = parser.parse_args() return args # TODO Still need to refactor to not building dataset. def main(): args = parse_args() if not os.path.exists(args.out_dir) and not args.show: os.mkdir(args.out_dir) # read deploy_cfg and config deploy_cfg, model_cfg = load_config(args.deploy_cfg, args.config) # build task and backend model task_processor = build_task_processor(model_cfg, deploy_cfg, args.device) model = task_processor.build_backend_model([args.checkpoint]) # get model input shape input_shape = get_input_shape(deploy_cfg) # get file list files, source_type = get_file_list(args.img) # start detector inference progress_bar = ProgressBar(len(files)) for file in files: # process input image model_inputs, _ = task_processor.create_input(file, input_shape) # do model inference with torch.no_grad(): result = model.test_step(model_inputs) if source_type['is_dir']: filename = os.path.relpath(file, args.img).replace('/', '_') else: filename = os.path.basename(file) out_file = None if args.show else os.path.join(args.out_dir, filename) # filter score result = result[0] result.pred_instances = result.pred_instances[ result.pred_instances.scores > args.score_thr] # visualize results task_processor.visualize( image=file, model=model, result=result, show_result=args.show, window_name=os.path.basename(filename), output_file=out_file) progress_bar.update() print('All done!') if __name__ == '__main__': main()
3,823
30.603306
95
py
mmyolo
mmyolo-main/demo/image_demo.py
# Copyright (c) OpenMMLab. All rights reserved. import os from argparse import ArgumentParser from pathlib import Path import mmcv from mmdet.apis import inference_detector, init_detector from mmengine.config import Config, ConfigDict from mmengine.logging import print_log from mmengine.utils import ProgressBar, path from mmyolo.registry import VISUALIZERS from mmyolo.utils import switch_to_deploy from mmyolo.utils.labelme_utils import LabelmeFormat from mmyolo.utils.misc import get_file_list, show_data_classes def parse_args(): parser = ArgumentParser() parser.add_argument( 'img', help='Image path, include image file, dir and URL.') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--out-dir', default='./output', help='Path to output file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--show', action='store_true', help='Show the detection results') parser.add_argument( '--deploy', action='store_true', help='Switch model to deployment mode') parser.add_argument( '--tta', action='store_true', help='Whether to use test time augmentation') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument( '--class-name', nargs='+', type=str, help='Only Save those classes if set') parser.add_argument( '--to-labelme', action='store_true', help='Output labelme style label file') args = parser.parse_args() return args def main(): args = parse_args() if args.to_labelme and args.show: raise RuntimeError('`--to-labelme` or `--show` only ' 'can choose one at the same time.') config = args.config if isinstance(config, (str, Path)): config = Config.fromfile(config) elif not isinstance(config, Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') if 'init_cfg' in config.model.backbone: config.model.backbone.init_cfg = None if args.tta: assert 'tta_model' in config, 'Cannot find ``tta_model`` in config.' \ " Can't use tta !" assert 'tta_pipeline' in config, 'Cannot find ``tta_pipeline`` ' \ "in config. Can't use tta !" config.model = ConfigDict(**config.tta_model, module=config.model) test_data_cfg = config.test_dataloader.dataset while 'dataset' in test_data_cfg: test_data_cfg = test_data_cfg['dataset'] # batch_shapes_cfg will force control the size of the output image, # it is not compatible with tta. if 'batch_shapes_cfg' in test_data_cfg: test_data_cfg.batch_shapes_cfg = None test_data_cfg.pipeline = config.tta_pipeline # TODO: TTA mode will error if cfg_options is not set. # This is an mmdet issue and needs to be fixed later. # build the model from a config file and a checkpoint file model = init_detector( config, args.checkpoint, device=args.device, cfg_options={}) if args.deploy: switch_to_deploy(model) if not args.show: path.mkdir_or_exist(args.out_dir) # init visualizer visualizer = VISUALIZERS.build(model.cfg.visualizer) visualizer.dataset_meta = model.dataset_meta # get file list files, source_type = get_file_list(args.img) # get model class name dataset_classes = model.dataset_meta.get('classes') # ready for labelme format if it is needed to_label_format = LabelmeFormat(classes=dataset_classes) # check class name if args.class_name is not None: for class_name in args.class_name: if class_name in dataset_classes: continue show_data_classes(dataset_classes) raise RuntimeError( 'Expected args.class_name to be one of the list, ' f'but got "{class_name}"') # start detector inference progress_bar = ProgressBar(len(files)) for file in files: result = inference_detector(model, file) img = mmcv.imread(file) img = mmcv.imconvert(img, 'bgr', 'rgb') if source_type['is_dir']: filename = os.path.relpath(file, args.img).replace('/', '_') else: filename = os.path.basename(file) out_file = None if args.show else os.path.join(args.out_dir, filename) progress_bar.update() # Get candidate predict info with score threshold pred_instances = result.pred_instances[ result.pred_instances.scores > args.score_thr] if args.to_labelme: # save result to labelme files out_file = out_file.replace( os.path.splitext(out_file)[-1], '.json') to_label_format(pred_instances, result.metainfo, out_file, args.class_name) continue visualizer.add_datasample( filename, img, data_sample=result, draw_gt=False, show=args.show, wait_time=0, out_file=out_file, pred_score_thr=args.score_thr) if not args.show and not args.to_labelme: print_log( f'\nResults have been saved at {os.path.abspath(args.out_dir)}') elif args.to_labelme: print_log('\nLabelme format label files ' f'had all been saved in {args.out_dir}') if __name__ == '__main__': main()
5,733
32.928994
78
py
mmyolo
mmyolo-main/demo/boxam_vis_demo.py
# Copyright (c) OpenMMLab. All rights reserved. """This script is in the experimental verification stage and cannot be guaranteed to be completely correct. Currently Grad-based CAM and Grad-free CAM are supported. The target detection task is different from the classification task. It not only includes the AM map of the category, but also includes information such as bbox and mask, so this script is named bboxam. """ import argparse import os.path import warnings from functools import partial import cv2 import mmcv from mmengine import Config, DictAction, MessageHub from mmengine.utils import ProgressBar from mmyolo.utils.boxam_utils import (BoxAMDetectorVisualizer, BoxAMDetectorWrapper, DetAblationLayer, DetBoxScoreTarget, GradCAM, GradCAMPlusPlus, reshape_transform) from mmyolo.utils.misc import get_file_list try: from pytorch_grad_cam import AblationCAM, EigenCAM except ImportError: raise ImportError('Please run `pip install "grad-cam"` to install ' 'pytorch_grad_cam package.') GRAD_FREE_METHOD_MAP = { 'ablationcam': AblationCAM, 'eigencam': EigenCAM, # 'scorecam': ScoreCAM, # consumes too much memory } GRAD_BASED_METHOD_MAP = {'gradcam': GradCAM, 'gradcam++': GradCAMPlusPlus} ALL_SUPPORT_METHODS = list(GRAD_FREE_METHOD_MAP.keys() | GRAD_BASED_METHOD_MAP.keys()) IGNORE_LOSS_PARAMS = { 'yolov5': ['loss_obj'], 'yolov6': ['loss_cls'], 'yolox': ['loss_obj'], 'rtmdet': ['loss_cls'], 'yolov7': ['loss_obj'], 'yolov8': ['loss_cls'], 'ppyoloe': ['loss_cls'], } # This parameter is required in some algorithms # for calculating Loss message_hub = MessageHub.get_current_instance() message_hub.runtime_info['epoch'] = 0 def parse_args(): parser = argparse.ArgumentParser(description='Visualize Box AM') parser.add_argument( 'img', help='Image path, include image file, dir and URL.') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--method', default='gradcam', choices=ALL_SUPPORT_METHODS, help='Type of method to use, supports ' f'{", ".join(ALL_SUPPORT_METHODS)}.') parser.add_argument( '--target-layers', default=['neck.out_layers[2]'], nargs='+', type=str, help='The target layers to get Box AM, if not set, the tool will ' 'specify the neck.out_layers[2]') parser.add_argument( '--out-dir', default='./output', help='Path to output file') parser.add_argument( '--show', action='store_true', help='Show the CAM results') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument( '--topk', type=int, default=-1, help='Select topk predict resutls to show. -1 are mean all.') parser.add_argument( '--max-shape', nargs='+', type=int, default=-1, help='max shapes. Its purpose is to save GPU memory. ' 'The activation map is scaled and then evaluated. ' 'If set to -1, it means no scaling.') parser.add_argument( '--preview-model', default=False, action='store_true', help='To preview all the model layers') parser.add_argument( '--norm-in-bbox', action='store_true', help='Norm in bbox of am image') parser.add_argument( '--cfg-options', nargs='+', action=DictAction, help='override some settings in the used config, the key-value pair ' 'in xxx=yyy format will be merged into config file. If the value to ' 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' 'Note that the quotation marks are necessary and that no white space ' 'is allowed.') # Only used by AblationCAM parser.add_argument( '--batch-size', type=int, default=1, help='batch of inference of AblationCAM') parser.add_argument( '--ratio-channels-to-ablate', type=int, default=0.5, help='Making it much faster of AblationCAM. ' 'The parameter controls how many channels should be ablated') args = parser.parse_args() return args def init_detector_and_visualizer(args, cfg): max_shape = args.max_shape if not isinstance(max_shape, list): max_shape = [args.max_shape] assert len(max_shape) == 1 or len(max_shape) == 2 model_wrapper = BoxAMDetectorWrapper( cfg, args.checkpoint, args.score_thr, device=args.device) if args.preview_model: print(model_wrapper.detector) print('\n Please remove `--preview-model` to get the BoxAM.') return None, None target_layers = [] for target_layer in args.target_layers: try: target_layers.append( eval(f'model_wrapper.detector.{target_layer}')) except Exception as e: print(model_wrapper.detector) raise RuntimeError('layer does not exist', e) ablationcam_extra_params = { 'batch_size': args.batch_size, 'ablation_layer': DetAblationLayer(), 'ratio_channels_to_ablate': args.ratio_channels_to_ablate } if args.method in GRAD_BASED_METHOD_MAP: method_class = GRAD_BASED_METHOD_MAP[args.method] is_need_grad = True else: method_class = GRAD_FREE_METHOD_MAP[args.method] is_need_grad = False boxam_detector_visualizer = BoxAMDetectorVisualizer( method_class, model_wrapper, target_layers, reshape_transform=partial( reshape_transform, max_shape=max_shape, is_need_grad=is_need_grad), is_need_grad=is_need_grad, extra_params=ablationcam_extra_params) return model_wrapper, boxam_detector_visualizer def main(): args = parse_args() # hard code ignore_loss_params = None for param_keys in IGNORE_LOSS_PARAMS: if param_keys in args.config: print(f'The algorithm currently used is {param_keys}') ignore_loss_params = IGNORE_LOSS_PARAMS[param_keys] break cfg = Config.fromfile(args.config) if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) if not os.path.exists(args.out_dir) and not args.show: os.mkdir(args.out_dir) model_wrapper, boxam_detector_visualizer = init_detector_and_visualizer( args, cfg) # get file list image_list, source_type = get_file_list(args.img) progress_bar = ProgressBar(len(image_list)) for image_path in image_list: image = cv2.imread(image_path) model_wrapper.set_input_data(image) # forward detection results result = model_wrapper()[0] pred_instances = result.pred_instances # Get candidate predict info with score threshold pred_instances = pred_instances[pred_instances.scores > args.score_thr] if len(pred_instances) == 0: warnings.warn('empty detection results! skip this') continue if args.topk > 0: pred_instances = pred_instances[:args.topk] targets = [ DetBoxScoreTarget( pred_instances, device=args.device, ignore_loss_params=ignore_loss_params) ] if args.method in GRAD_BASED_METHOD_MAP: model_wrapper.need_loss(True) model_wrapper.set_input_data(image, pred_instances) boxam_detector_visualizer.switch_activations_and_grads( model_wrapper) # get box am image grayscale_boxam = boxam_detector_visualizer(image, targets=targets) # draw cam on image pred_instances = pred_instances.numpy() image_with_bounding_boxes = boxam_detector_visualizer.show_am( image, pred_instances, grayscale_boxam, with_norm_in_bboxes=args.norm_in_bbox) if source_type['is_dir']: filename = os.path.relpath(image_path, args.img).replace('/', '_') else: filename = os.path.basename(image_path) out_file = None if args.show else os.path.join(args.out_dir, filename) if out_file: mmcv.imwrite(image_with_bounding_boxes, out_file) else: cv2.namedWindow(filename, 0) cv2.imshow(filename, image_with_bounding_boxes) cv2.waitKey(0) # switch if args.method in GRAD_BASED_METHOD_MAP: model_wrapper.need_loss(False) boxam_detector_visualizer.switch_activations_and_grads( model_wrapper) progress_bar.update() if not args.show: print(f'All done!' f'\nResults have been saved at {os.path.abspath(args.out_dir)}') if __name__ == '__main__': main()
9,251
32.400722
79
py
mmyolo
mmyolo-main/configs/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py
_base_ = './rtmdet_l_syncbn_fast_8xb32-300e_coco.py' # ========================modified parameters====================== deepen_factor = 1.33 widen_factor = 1.25 # =======================Unmodified in most cases================== model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
457
37.166667
74
py
mmyolo
mmyolo-main/configs/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py
_base_ = './rtmdet_s_syncbn_fast_8xb32-300e_coco.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.167 widen_factor = 0.375 img_scale = _base_.img_scale # ratio range for random resize random_resize_ratio_range = (0.5, 2.0) # Number of cached images in mosaic mosaic_max_cached_images = 20 # Number of cached images in mixup mixup_max_cached_images = 10 # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='Mosaic', img_scale=img_scale, use_cached=True, max_cached_images=mosaic_max_cached_images, # note random_pop=False, # note pad_val=114.0), dict( type='mmdet.RandomResize', # img_scale is (width, height) scale=(img_scale[0] * 2, img_scale[1] * 2), ratio_range=random_resize_ratio_range, resize_type='mmdet.Resize', keep_ratio=True), dict(type='mmdet.RandomCrop', crop_size=img_scale), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict( type='YOLOv5MixUp', use_cached=True, random_pop=False, max_cached_images=mixup_max_cached_images, prob=0.5), dict(type='mmdet.PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline))
1,988
32.711864
129
py
mmyolo
mmyolo-main/configs/rtmdet/rtmdet_tiny_fast_1xb12-40e_cat.py
_base_ = 'rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py' data_root = './data/cat/' class_name = ('cat', ) num_classes = len(class_name) metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) num_epochs_stage2 = 5 max_epochs = 40 train_batch_size_per_gpu = 12 train_num_workers = 4 val_batch_size_per_gpu = 1 val_num_workers = 2 load_from = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco/rtmdet_tiny_syncbn_fast_8xb32-300e_coco_20230102_140117-dbb1dc83.pth' # noqa model = dict( backbone=dict(frozen_stages=4), bbox_head=dict(head_module=dict(num_classes=num_classes)), train_cfg=dict(assigner=dict(num_classes=num_classes))) train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, dataset=dict( data_root=data_root, metainfo=metainfo, ann_file='annotations/trainval.json', data_prefix=dict(img='images/'))) val_dataloader = dict( batch_size=val_batch_size_per_gpu, num_workers=val_num_workers, dataset=dict( metainfo=metainfo, data_root=data_root, ann_file='annotations/test.json', data_prefix=dict(img='images/'))) test_dataloader = val_dataloader param_scheduler = [ dict( type='LinearLR', start_factor=_base_.lr_start_factor, by_epoch=False, begin=0, end=30), dict( # use cosine lr from 150 to 300 epoch type='CosineAnnealingLR', eta_min=_base_.base_lr * 0.05, begin=max_epochs // 2, end=max_epochs, T_max=max_epochs // 2, by_epoch=True, convert_to_iter_based=True), ] _base_.custom_hooks[1].switch_epoch = max_epochs - num_epochs_stage2 val_evaluator = dict(ann_file=data_root + 'annotations/test.json') test_evaluator = val_evaluator default_hooks = dict( checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), logger=dict(type='LoggerHook', interval=5)) train_cfg = dict(max_epochs=max_epochs, val_interval=10) # visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa
2,147
29.253521
178
py
mmyolo
mmyolo-main/configs/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py
_base_ = './rtmdet_l_syncbn_fast_8xb32-300e_coco.py' # ========================modified parameters====================== deepen_factor = 0.67 widen_factor = 0.75 # =======================Unmodified in most cases================== model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
457
37.166667
74
py
mmyolo
mmyolo-main/configs/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py
_base_ = './rtmdet_l_syncbn_fast_8xb32-300e_coco.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.33 widen_factor = 0.5 img_scale = _base_.img_scale # ratio range for random resize random_resize_ratio_range = (0.5, 2.0) # Number of cached images in mosaic mosaic_max_cached_images = 40 # Number of cached images in mixup mixup_max_cached_images = 20 # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, # Since the checkpoint includes CUDA:0 data, # it must be forced to set map_location. # Once checkpoint is fixed, it can be removed. init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint, map_location='cpu')), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='Mosaic', img_scale=img_scale, use_cached=True, max_cached_images=mosaic_max_cached_images, pad_val=114.0), dict( type='mmdet.RandomResize', # img_scale is (width, height) scale=(img_scale[0] * 2, img_scale[1] * 2), ratio_range=random_resize_ratio_range, # note resize_type='mmdet.Resize', keep_ratio=True), dict(type='mmdet.RandomCrop', crop_size=img_scale), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict( type='YOLOv5MixUp', use_cached=True, max_cached_images=mixup_max_cached_images), dict(type='mmdet.PackDetInputs') ] train_pipeline_stage2 = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='mmdet.RandomResize', scale=img_scale, ratio_range=random_resize_ratio_range, # note resize_type='mmdet.Resize', keep_ratio=True), dict(type='mmdet.RandomCrop', crop_size=img_scale), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict(type='mmdet.PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49), dict( type='mmdet.PipelineSwitchHook', switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, switch_pipeline=train_pipeline_stage2) ]
3,129
32.655914
126
py
mmyolo
mmyolo-main/configs/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py
_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] # ========================Frequently modified parameters====================== # -----data related----- data_root = 'data/coco/' # Path of train annotation file train_ann_file = 'annotations/instances_train2017.json' train_data_prefix = 'train2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path num_classes = 80 # Number of classes for classification # Batch size of a single GPU during training train_batch_size_per_gpu = 32 # Worker to pre-fetch data for each single GPU during training train_num_workers = 10 # persistent_workers must be False if num_workers is 0. persistent_workers = True # -----train val related----- # Base learning rate for optim_wrapper. Corresponding to 8xb16=64 bs base_lr = 0.004 max_epochs = 300 # Maximum training epochs # Change train_pipeline for final 20 epochs (stage 2) num_epochs_stage2 = 20 model_test_cfg = dict( # The config of multi-label for multi-class prediction. multi_label=True, # The number of boxes before NMS nms_pre=30000, score_thr=0.001, # Threshold to filter out boxes. nms=dict(type='nms', iou_threshold=0.65), # NMS type and threshold max_per_img=300) # Max number of detections of each image # ========================Possible modified parameters======================== # -----data related----- img_scale = (640, 640) # width, height # ratio range for random resize random_resize_ratio_range = (0.1, 2.0) # Cached images number in mosaic mosaic_max_cached_images = 40 # Number of cached images in mixup mixup_max_cached_images = 20 # Dataset type, this will be used to define the dataset dataset_type = 'YOLOv5CocoDataset' # Batch size of a single GPU during validation val_batch_size_per_gpu = 32 # Worker to pre-fetch data for each single GPU during validation val_num_workers = 10 # Config of batch shapes. Only on val. batch_shapes_cfg = dict( type='BatchShapePolicy', batch_size=val_batch_size_per_gpu, img_size=img_scale[0], size_divisor=32, extra_pad_ratio=0.5) # -----model related----- # The scaling factor that controls the depth of the network structure deepen_factor = 1.0 # The scaling factor that controls the width of the network structure widen_factor = 1.0 # Strides of multi-scale prior box strides = [8, 16, 32] norm_cfg = dict(type='BN') # Normalization config # -----train val related----- lr_start_factor = 1.0e-5 dsl_topk = 13 # Number of bbox selected in each level loss_cls_weight = 1.0 loss_bbox_weight = 2.0 qfl_beta = 2.0 # beta of QualityFocalLoss weight_decay = 0.05 # Save model checkpoint and validation intervals save_checkpoint_intervals = 10 # validation intervals in stage 2 val_interval_stage2 = 1 # The maximum checkpoints to keep. max_keep_ckpts = 3 # single-scale training is recommended to # be turned on, which can speed up training. env_cfg = dict(cudnn_benchmark=True) # ===============================Unmodified in most cases==================== model = dict( type='YOLODetector', data_preprocessor=dict( type='YOLOv5DetDataPreprocessor', mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False), backbone=dict( type='CSPNeXt', arch='P5', expand_ratio=0.5, deepen_factor=deepen_factor, widen_factor=widen_factor, channel_attention=True, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True)), neck=dict( type='CSPNeXtPAFPN', deepen_factor=deepen_factor, widen_factor=widen_factor, in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3, expand_ratio=0.5, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True)), bbox_head=dict( type='RTMDetHead', head_module=dict( type='RTMDetSepBNHeadModule', num_classes=num_classes, in_channels=256, stacked_convs=2, feat_channels=256, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True), share_conv=True, pred_kernel_size=1, featmap_strides=strides), prior_generator=dict( type='mmdet.MlvlPointGenerator', offset=0, strides=strides), bbox_coder=dict(type='DistancePointBBoxCoder'), loss_cls=dict( type='mmdet.QualityFocalLoss', use_sigmoid=True, beta=qfl_beta, loss_weight=loss_cls_weight), loss_bbox=dict(type='mmdet.GIoULoss', loss_weight=loss_bbox_weight)), train_cfg=dict( assigner=dict( type='BatchDynamicSoftLabelAssigner', num_classes=num_classes, topk=dsl_topk, iou_calculator=dict(type='mmdet.BboxOverlaps2D')), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=model_test_cfg, ) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='Mosaic', img_scale=img_scale, use_cached=True, max_cached_images=mosaic_max_cached_images, pad_val=114.0), dict( type='mmdet.RandomResize', # img_scale is (width, height) scale=(img_scale[0] * 2, img_scale[1] * 2), ratio_range=random_resize_ratio_range, resize_type='mmdet.Resize', keep_ratio=True), dict(type='mmdet.RandomCrop', crop_size=img_scale), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict( type='YOLOv5MixUp', use_cached=True, max_cached_images=mixup_max_cached_images), dict(type='mmdet.PackDetInputs') ] train_pipeline_stage2 = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True), dict( type='mmdet.RandomResize', scale=img_scale, ratio_range=random_resize_ratio_range, resize_type='mmdet.Resize', keep_ratio=True), dict(type='mmdet.RandomCrop', crop_size=img_scale), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict(type='mmdet.PackDetInputs') ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='YOLOv5KeepRatioResize', scale=img_scale), dict( type='LetterResize', scale=img_scale, allow_scale_up=False, pad_val=dict(img=114)), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param')) ] train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, persistent_workers=persistent_workers, pin_memory=True, collate_fn=dict(type='yolov5_collate'), sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type=dataset_type, data_root=data_root, ann_file=train_ann_file, data_prefix=dict(img=train_data_prefix), filter_cfg=dict(filter_empty_gt=True, min_size=32), pipeline=train_pipeline)) val_dataloader = dict( batch_size=val_batch_size_per_gpu, num_workers=val_num_workers, persistent_workers=persistent_workers, pin_memory=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file=val_ann_file, data_prefix=dict(img=val_data_prefix), test_mode=True, batch_shapes_cfg=batch_shapes_cfg, pipeline=test_pipeline)) test_dataloader = val_dataloader # Reduce evaluation time val_evaluator = dict( type='mmdet.CocoMetric', proposal_nums=(100, 1, 10), ann_file=data_root + val_ann_file, metric='bbox') test_evaluator = val_evaluator # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='AdamW', lr=base_lr, weight_decay=weight_decay), paramwise_cfg=dict( norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=lr_start_factor, by_epoch=False, begin=0, end=1000), dict( # use cosine lr from 150 to 300 epoch type='CosineAnnealingLR', eta_min=base_lr * 0.05, begin=max_epochs // 2, end=max_epochs, T_max=max_epochs // 2, by_epoch=True, convert_to_iter_based=True), ] # hooks default_hooks = dict( checkpoint=dict( type='CheckpointHook', interval=save_checkpoint_intervals, max_keep_ckpts=max_keep_ckpts # only keep latest 3 checkpoints )) custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49), dict( type='mmdet.PipelineSwitchHook', switch_epoch=max_epochs - num_epochs_stage2, switch_pipeline=train_pipeline_stage2) ] train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=save_checkpoint_intervals, dynamic_intervals=[(max_epochs - num_epochs_stage2, val_interval_stage2)]) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop')
9,806
31.154098
78
py
mmyolo
mmyolo-main/configs/rtmdet/rtmdet-ins_s_syncbn_fast_8xb32-300e_coco.py
_base_ = './rtmdet_s_syncbn_fast_8xb32-300e_coco.py' widen_factor = 0.5 model = dict( bbox_head=dict( type='RTMDetInsSepBNHead', head_module=dict( type='RTMDetInsSepBNHeadModule', use_sigmoid_cls=True, widen_factor=widen_factor), loss_mask=dict( type='mmdet.DiceLoss', loss_weight=2.0, eps=5e-6, reduction='mean')), test_cfg=dict( multi_label=True, nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100, mask_thr_binary=0.5)) _base_.test_pipeline[-2] = dict( type='LoadAnnotations', with_bbox=True, with_mask=True, _scope_='mmdet') val_dataloader = dict(dataset=dict(pipeline=_base_.test_pipeline)) test_dataloader = val_dataloader val_evaluator = dict(metric=['bbox', 'segm']) test_evaluator = val_evaluator
916
27.65625
76
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.67 widen_factor = 0.75 # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
1,274
36.5
145
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota-ms.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.167 widen_factor = 0.375 # Batch size of a single GPU during training train_batch_size_per_gpu = 8 # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict(batch_size=train_batch_size_per_gpu) # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
1,400
34.923077
129
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_tiny_fast_1xb8-36e_dota.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.167 widen_factor = 0.375 # Batch size of a single GPU during training train_batch_size_per_gpu = 8 # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict(batch_size=train_batch_size_per_gpu) # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
1,397
34.846154
129
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' # ========================modified parameters====================== data_root = 'data/split_ms_dota/' # Path of test images folder test_data_prefix = 'test/images/' # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== train_dataloader = dict(dataset=dict(data_root=data_root)) val_dataloader = dict(dataset=dict(data_root=data_root)) # Inference on val dataset test_dataloader = val_dataloader # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
1,058
33.16129
69
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.33 widen_factor = 0.5 # Batch size of a single GPU during training train_batch_size_per_gpu = 8 # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict(batch_size=train_batch_size_per_gpu) # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
1,391
34.692308
126
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_coco-pretrain_2xb4-36e_dota-ms.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' load_from = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth' # noqa # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
837
38.904762
172
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_s_fast_1xb8-36e_dota-ms.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-s_imagenet_600e.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.33 widen_factor = 0.5 # Batch size of a single GPU during training train_batch_size_per_gpu = 8 # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict(batch_size=train_batch_size_per_gpu) # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
1,394
34.769231
126
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-aug-100e_dota.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py' # This config use longer schedule with Mixup, Mosaic and Random Rotate. checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth' # noqa # ========================modified parameters====================== # Base learning rate for optim_wrapper. Corresponding to 1xb8=8 bs base_lr = 0.00025 # 0.004 / 16 lr_start_factor = 1.0e-5 max_epochs = 100 # Maximum training epochs # Change train_pipeline for final 10 epochs (stage 2) num_epochs_stage2 = 10 img_scale = (1024, 1024) # width, height # ratio range for random resize random_resize_ratio_range = (0.1, 2.0) # Cached images number in mosaic mosaic_max_cached_images = 40 # Number of cached images in mixup mixup_max_cached_images = 20 # ratio for random rotate random_rotate_ratio = 0.5 # label ids for rect objs rotate_rect_obj_labels = [9, 11] # Save model checkpoint and validation intervals save_checkpoint_intervals = 1 # validation intervals in stage 2 val_interval_stage2 = 1 # The maximum checkpoints to keep. max_keep_ckpts = 3 # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True, box_type='qbox'), dict( type='mmrotate.ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')), dict( type='Mosaic', img_scale=img_scale, use_cached=True, max_cached_images=mosaic_max_cached_images, pad_val=114.0), dict( type='mmdet.RandomResize', # img_scale is (width, height) scale=(img_scale[0] * 2, img_scale[1] * 2), ratio_range=random_resize_ratio_range, resize_type='mmdet.Resize', keep_ratio=True), dict( type='mmrotate.RandomRotate', prob=random_rotate_ratio, angle_range=180, rotate_type='mmrotate.Rotate', rect_obj_labels=rotate_rect_obj_labels), dict(type='mmdet.RandomCrop', crop_size=img_scale), dict(type='mmdet.YOLOXHSVRandomAug'), dict( type='mmdet.RandomFlip', prob=0.75, direction=['horizontal', 'vertical', 'diagonal']), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict( type='YOLOv5MixUp', use_cached=True, max_cached_images=mixup_max_cached_images), dict(type='mmdet.PackDetInputs') ] train_pipeline_stage2 = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True, box_type='qbox'), dict( type='mmrotate.ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')), dict( type='mmdet.RandomResize', scale=img_scale, ratio_range=random_resize_ratio_range, resize_type='mmdet.Resize', keep_ratio=True), dict( type='mmrotate.RandomRotate', prob=random_rotate_ratio, angle_range=180, rotate_type='mmrotate.Rotate', rect_obj_labels=rotate_rect_obj_labels), dict(type='mmdet.RandomCrop', crop_size=img_scale), dict(type='mmdet.YOLOXHSVRandomAug'), dict( type='mmdet.RandomFlip', prob=0.75, direction=['horizontal', 'vertical', 'diagonal']), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict(type='mmdet.PackDetInputs') ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=lr_start_factor, by_epoch=False, begin=0, end=1000), dict( # use cosine lr from 150 to 300 epoch type='CosineAnnealingLR', eta_min=base_lr * 0.05, begin=max_epochs // 2, end=max_epochs, T_max=max_epochs // 2, by_epoch=True, convert_to_iter_based=True), ] # hooks default_hooks = dict( checkpoint=dict( type='CheckpointHook', interval=save_checkpoint_intervals, max_keep_ckpts=max_keep_ckpts, # only keep latest 3 checkpoints save_best='auto')) custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49), dict( type='mmdet.PipelineSwitchHook', switch_epoch=max_epochs - num_epochs_stage2, switch_pipeline=train_pipeline_stage2) ] train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=save_checkpoint_intervals, dynamic_intervals=[(max_epochs - num_epochs_stage2, val_interval_stage2)]) # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
5,386
30.87574
145
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_m_syncbn_fast_2xb4-36e_dota-ms.py
_base_ = './rtmdet-r_l_syncbn_fast_2xb4-36e_dota-ms.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-m_8xb256-rsb-a1-600e_in1k-ecb3bbd9.pth' # noqa # ========================modified parameters====================== deepen_factor = 0.67 widen_factor = 0.75 # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' # =======================Unmodified in most cases================== model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # dataset=dict( # data_root=_base_.data_root, # ann_file='', # test set has no annotation # data_prefix=dict(img_path=_base_.test_data_prefix), # pipeline=_base_.test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir)
1,277
36.588235
145
py
mmyolo
mmyolo-main/configs/rtmdet/rotated/rtmdet-r_l_syncbn_fast_2xb4-36e_dota.py
_base_ = '../../_base_/default_runtime.py' checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-l_8xb256-rsb-a1-600e_in1k-6a760974.pth' # noqa # ========================Frequently modified parameters====================== # -----data related----- data_root = 'data/split_ss_dota/' # Path of train annotation folder train_ann_file = 'trainval/annfiles/' train_data_prefix = 'trainval/images/' # Prefix of train image path # Path of val annotation folder val_ann_file = 'trainval/annfiles/' val_data_prefix = 'trainval/images/' # Prefix of val image path # Path of test images folder test_data_prefix = 'test/images/' # Submission dir for result submit submission_dir = './work_dirs/{{fileBasenameNoExtension}}/submission' num_classes = 15 # Number of classes for classification # Batch size of a single GPU during training train_batch_size_per_gpu = 4 # Worker to pre-fetch data for each single GPU during training train_num_workers = 8 # persistent_workers must be False if num_workers is 0. persistent_workers = True # -----train val related----- # Base learning rate for optim_wrapper. Corresponding to 1xb8=8 bs base_lr = 0.00025 # 0.004 / 16 max_epochs = 36 # Maximum training epochs model_test_cfg = dict( # The config of multi-label for multi-class prediction. multi_label=True, # Decode rbox with angle, For RTMDet-R, Defaults to True. # When set to True, use rbox coder such as DistanceAnglePointCoder # When set to False, use hbox coder such as DistancePointBBoxCoder # different setting lead to different AP. decode_with_angle=True, # The number of boxes before NMS nms_pre=30000, score_thr=0.05, # Threshold to filter out boxes. nms=dict(type='nms_rotated', iou_threshold=0.1), # NMS type and threshold max_per_img=2000) # Max number of detections of each image # ========================Possible modified parameters======================== # -----data related----- img_scale = (1024, 1024) # width, height # ratio for random rotate random_rotate_ratio = 0.5 # label ids for rect objs rotate_rect_obj_labels = [9, 11] # Dataset type, this will be used to define the dataset dataset_type = 'YOLOv5DOTADataset' # Batch size of a single GPU during validation val_batch_size_per_gpu = 8 # Worker to pre-fetch data for each single GPU during validation val_num_workers = 8 # Config of batch shapes. Only on val. Not use in RTMDet-R batch_shapes_cfg = None # -----model related----- # The scaling factor that controls the depth of the network structure deepen_factor = 1.0 # The scaling factor that controls the width of the network structure widen_factor = 1.0 # Strides of multi-scale prior box strides = [8, 16, 32] # The angle definition for model angle_version = 'le90' # le90, le135, oc are available options norm_cfg = dict(type='BN') # Normalization config # -----train val related----- lr_start_factor = 1.0e-5 dsl_topk = 13 # Number of bbox selected in each level loss_cls_weight = 1.0 loss_bbox_weight = 2.0 qfl_beta = 2.0 # beta of QualityFocalLoss weight_decay = 0.05 # Save model checkpoint and validation intervals save_checkpoint_intervals = 1 # The maximum checkpoints to keep. max_keep_ckpts = 3 # single-scale training is recommended to # be turned on, which can speed up training. env_cfg = dict(cudnn_benchmark=True) # ===============================Unmodified in most cases==================== model = dict( type='YOLODetector', data_preprocessor=dict( type='YOLOv5DetDataPreprocessor', mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], bgr_to_rgb=False), backbone=dict( type='CSPNeXt', arch='P5', expand_ratio=0.5, deepen_factor=deepen_factor, widen_factor=widen_factor, channel_attention=True, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True), init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( type='CSPNeXtPAFPN', deepen_factor=deepen_factor, widen_factor=widen_factor, in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3, expand_ratio=0.5, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True)), bbox_head=dict( type='RTMDetRotatedHead', head_module=dict( type='RTMDetRotatedSepBNHeadModule', num_classes=num_classes, widen_factor=widen_factor, in_channels=256, stacked_convs=2, feat_channels=256, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True), share_conv=True, pred_kernel_size=1, featmap_strides=strides), prior_generator=dict( type='mmdet.MlvlPointGenerator', offset=0, strides=strides), bbox_coder=dict( type='DistanceAnglePointCoder', angle_version=angle_version), loss_cls=dict( type='mmdet.QualityFocalLoss', use_sigmoid=True, beta=qfl_beta, loss_weight=loss_cls_weight), loss_bbox=dict( type='mmrotate.RotatedIoULoss', mode='linear', loss_weight=loss_bbox_weight), angle_version=angle_version, # Used for angle encode and decode, similar to bbox coder angle_coder=dict(type='mmrotate.PseudoAngleCoder'), # If true, it will apply loss_bbox on horizontal box, and angle_loss # needs to be specified. In this case the loss_bbox should use # horizontal box loss e.g. IoULoss. Arg details can be seen in # `docs/zh_cn/tutorials/rotated_detection.md` use_hbbox_loss=False, loss_angle=None), train_cfg=dict( assigner=dict( type='BatchDynamicSoftLabelAssigner', num_classes=num_classes, topk=dsl_topk, iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'), # RBboxOverlaps2D doesn't support batch input, use loop instead. batch_iou=False), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=model_test_cfg, ) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True, box_type='qbox'), dict( type='mmrotate.ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')), dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), dict( type='mmdet.RandomFlip', prob=0.75, direction=['horizontal', 'vertical', 'diagonal']), dict( type='mmrotate.RandomRotate', prob=random_rotate_ratio, angle_range=180, rotate_type='mmrotate.Rotate', rect_obj_labels=rotate_rect_obj_labels), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict(type='RegularizeRotatedBox', angle_version=angle_version), dict(type='mmdet.PackDetInputs') ] val_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict( type='LoadAnnotations', with_bbox=True, box_type='qbox', _scope_='mmdet'), dict( type='mmrotate.ConvertBoxType', box_type_mapping=dict(gt_bboxes='rbox')), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), dict(type='mmdet.Pad', size=img_scale, pad_val=dict(img=(114, 114, 114))), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, persistent_workers=persistent_workers, pin_memory=True, collate_fn=dict(type='yolov5_collate'), sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type=dataset_type, data_root=data_root, ann_file=train_ann_file, data_prefix=dict(img_path=train_data_prefix), filter_cfg=dict(filter_empty_gt=True), pipeline=train_pipeline)) val_dataloader = dict( batch_size=val_batch_size_per_gpu, num_workers=val_num_workers, persistent_workers=persistent_workers, pin_memory=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file=val_ann_file, data_prefix=dict(img_path=val_data_prefix), test_mode=True, batch_shapes_cfg=batch_shapes_cfg, pipeline=val_pipeline)) val_evaluator = dict(type='mmrotate.DOTAMetric', metric='mAP') # Inference on val dataset test_dataloader = val_dataloader test_evaluator = val_evaluator # Inference on test dataset and format the output results # for submission. Note: the test set has no annotation. # test_dataloader = dict( # batch_size=val_batch_size_per_gpu, # num_workers=val_num_workers, # persistent_workers=True, # drop_last=False, # sampler=dict(type='DefaultSampler', shuffle=False), # dataset=dict( # type=dataset_type, # data_root=data_root, # data_prefix=dict(img_path=test_data_prefix), # test_mode=True, # batch_shapes_cfg=batch_shapes_cfg, # pipeline=test_pipeline)) # test_evaluator = dict( # type='mmrotate.DOTAMetric', # format_only=True, # merge_patches=True, # outfile_prefix=submission_dir) # optimizer optim_wrapper = dict( type='OptimWrapper', optimizer=dict(type='AdamW', lr=base_lr, weight_decay=weight_decay), paramwise_cfg=dict( norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) # learning rate param_scheduler = [ dict( type='LinearLR', start_factor=lr_start_factor, by_epoch=False, begin=0, end=1000), dict( # use cosine lr from 150 to 300 epoch type='CosineAnnealingLR', eta_min=base_lr * 0.05, begin=max_epochs // 2, end=max_epochs, T_max=max_epochs // 2, by_epoch=True, convert_to_iter_based=True), ] # hooks default_hooks = dict( checkpoint=dict( type='CheckpointHook', interval=save_checkpoint_intervals, max_keep_ckpts=max_keep_ckpts, # only keep latest 3 checkpoints save_best='auto')) custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49) ] train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=save_checkpoint_intervals) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop') visualizer = dict(type='mmrotate.RotLocalVisualizer')
11,268
32.942771
145
py
mmyolo
mmyolo-main/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-tiny_8xb256-rsb-a1-600e_in1k.py
_base_ = './cspnext-s_8xb256-rsb-a1-600e_in1k.py' model = dict( backbone=dict(deepen_factor=0.167, widen_factor=0.375), head=dict(in_channels=384))
157
25.333333
59
py
mmyolo
mmyolo-main/configs/rtmdet/cspnext_imagenet_pretrain/cspnext-s_8xb256-rsb-a1-600e_in1k.py
_base_ = [ 'mmcls::_base_/datasets/imagenet_bs256_rsb_a12.py', 'mmcls::_base_/schedules/imagenet_bs2048_rsb.py', 'mmcls::_base_/default_runtime.py' ] custom_imports = dict( imports=['mmdet.models', 'mmyolo.models'], allow_failed_imports=False) model = dict( type='ImageClassifier', backbone=dict( type='mmyolo.CSPNeXt', arch='P5', out_indices=(4, ), expand_ratio=0.5, deepen_factor=0.33, widen_factor=0.5, channel_attention=True, norm_cfg=dict(type='BN'), act_cfg=dict(type='mmyolo.SiLU')), neck=dict(type='GlobalAveragePooling'), head=dict( type='LinearClsHead', num_classes=1000, in_channels=512, loss=dict( type='LabelSmoothLoss', label_smooth_val=0.1, mode='original', loss_weight=1.0), topk=(1, 5)), train_cfg=dict(augments=[ dict(type='Mixup', alpha=0.2, num_classes=1000), dict(type='CutMix', alpha=1.0, num_classes=1000) ])) # dataset settings train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True)) # schedule settings optim_wrapper = dict( optimizer=dict(weight_decay=0.01), paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), ) param_scheduler = [ # warm up learning rate scheduler dict( type='LinearLR', start_factor=0.0001, by_epoch=True, begin=0, end=5, # update by iter convert_to_iter_based=True), # main learning rate scheduler dict( type='CosineAnnealingLR', T_max=595, eta_min=1.0e-6, by_epoch=True, begin=5, end=600) ] train_cfg = dict(by_epoch=True, max_epochs=600)
1,766
24.985294
76
py
mmyolo
mmyolo-main/configs/rtmdet/distillation/kd_s_rtmdet_m_neck_300e_coco.py
_base_ = '../rtmdet_s_syncbn_fast_8xb32-300e_coco.py' teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco/rtmdet_m_syncbn_fast_8xb32-300e_coco_20230102_135952-40af4fe8.pth' # noqa: E501 norm_cfg = dict(type='BN', affine=False, track_running_stats=False) model = dict( _delete_=True, _scope_='mmrazor', type='FpnTeacherDistill', architecture=dict( cfg_path='mmyolo::rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py'), teacher=dict( cfg_path='mmyolo::rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py'), teacher_ckpt=teacher_ckpt, distiller=dict( type='ConfigurableDistiller', # `recorders` are used to record various intermediate results during # the model forward. student_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), ), teacher_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), # `connectors` are adaptive layers which usually map teacher's and # students features to the same dimension. connectors=dict( fpn0_s=dict( type='ConvModuleConnector', in_channel=128, out_channel=192, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn0_t=dict( type='NormConnector', in_channels=192, norm_cfg=norm_cfg), fpn1_s=dict( type='ConvModuleConnector', in_channel=128, out_channel=192, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn1_t=dict( type='NormConnector', in_channels=192, norm_cfg=norm_cfg), fpn2_s=dict( type='ConvModuleConnector', in_channel=128, out_channel=192, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn2_t=dict( type='NormConnector', in_channels=192, norm_cfg=norm_cfg)), distill_losses=dict( loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), # `loss_forward_mappings` are mappings between distill loss forward # arguments and records. loss_forward_mappings=dict( loss_fpn0=dict( preds_S=dict( from_student=True, recorder='fpn0', connector='fpn0_s'), preds_T=dict( from_student=False, recorder='fpn0', connector='fpn0_t')), loss_fpn1=dict( preds_S=dict( from_student=True, recorder='fpn1', connector='fpn1_s'), preds_T=dict( from_student=False, recorder='fpn1', connector='fpn1_t')), loss_fpn2=dict( preds_S=dict( from_student=True, recorder='fpn2', connector='fpn2_s'), preds_T=dict( from_student=False, recorder='fpn2', connector='fpn2_t'))))) find_unused_parameters = True custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49), dict( type='mmdet.PipelineSwitchHook', switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, switch_pipeline=_base_.train_pipeline_stage2), # stop distillation after the 280th epoch dict(type='mmrazor.StopDistillHook', stop_epoch=280) ]
4,108
40.09
181
py
mmyolo
mmyolo-main/configs/rtmdet/distillation/kd_l_rtmdet_x_neck_300e_coco.py
_base_ = '../rtmdet_l_syncbn_fast_8xb32-300e_coco.py' teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco/rtmdet_x_syncbn_fast_8xb32-300e_coco_20221231_100345-b85cd476.pth' # noqa: E501 norm_cfg = dict(type='BN', affine=False, track_running_stats=False) model = dict( _delete_=True, _scope_='mmrazor', type='FpnTeacherDistill', architecture=dict( cfg_path='mmyolo::rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py'), teacher=dict( cfg_path='mmyolo::rtmdet/rtmdet_x_syncbn_fast_8xb32-300e_coco.py'), teacher_ckpt=teacher_ckpt, distiller=dict( type='ConfigurableDistiller', # `recorders` are used to record various intermediate results during # the model forward. student_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), ), teacher_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), # `connectors` are adaptive layers which usually map teacher's and # students features to the same dimension. connectors=dict( fpn0_s=dict( type='ConvModuleConnector', in_channel=256, out_channel=320, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn0_t=dict( type='NormConnector', in_channels=320, norm_cfg=norm_cfg), fpn1_s=dict( type='ConvModuleConnector', in_channel=256, out_channel=320, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn1_t=dict( type='NormConnector', in_channels=320, norm_cfg=norm_cfg), fpn2_s=dict( type='ConvModuleConnector', in_channel=256, out_channel=320, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn2_t=dict( type='NormConnector', in_channels=320, norm_cfg=norm_cfg)), distill_losses=dict( loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), # `loss_forward_mappings` are mappings between distill loss forward # arguments and records. loss_forward_mappings=dict( loss_fpn0=dict( preds_S=dict( from_student=True, recorder='fpn0', connector='fpn0_s'), preds_T=dict( from_student=False, recorder='fpn0', connector='fpn0_t')), loss_fpn1=dict( preds_S=dict( from_student=True, recorder='fpn1', connector='fpn1_s'), preds_T=dict( from_student=False, recorder='fpn1', connector='fpn1_t')), loss_fpn2=dict( preds_S=dict( from_student=True, recorder='fpn2', connector='fpn2_s'), preds_T=dict( from_student=False, recorder='fpn2', connector='fpn2_t'))))) find_unused_parameters = True custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49), dict( type='mmdet.PipelineSwitchHook', switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, switch_pipeline=_base_.train_pipeline_stage2), # stop distillation after the 280th epoch dict(type='mmrazor.StopDistillHook', stop_epoch=280) ]
4,108
40.09
181
py
mmyolo
mmyolo-main/configs/rtmdet/distillation/kd_m_rtmdet_l_neck_300e_coco.py
_base_ = '../rtmdet_m_syncbn_fast_8xb32-300e_coco.py' teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco/rtmdet_l_syncbn_fast_8xb32-300e_coco_20230102_135928-ee3abdc4.pth' # noqa: E501 norm_cfg = dict(type='BN', affine=False, track_running_stats=False) model = dict( _delete_=True, _scope_='mmrazor', type='FpnTeacherDistill', architecture=dict( cfg_path='mmyolo::rtmdet/rtmdet_m_syncbn_fast_8xb32-300e_coco.py'), teacher=dict( cfg_path='mmyolo::rtmdet/rtmdet_l_syncbn_fast_8xb32-300e_coco.py'), teacher_ckpt=teacher_ckpt, distiller=dict( type='ConfigurableDistiller', # `recorders` are used to record various intermediate results during # the model forward. student_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), ), teacher_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), # `connectors` are adaptive layers which usually map teacher's and # students features to the same dimension. connectors=dict( fpn0_s=dict( type='ConvModuleConnector', in_channel=192, out_channel=256, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn0_t=dict( type='NormConnector', in_channels=256, norm_cfg=norm_cfg), fpn1_s=dict( type='ConvModuleConnector', in_channel=192, out_channel=256, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn1_t=dict( type='NormConnector', in_channels=256, norm_cfg=norm_cfg), fpn2_s=dict( type='ConvModuleConnector', in_channel=192, out_channel=256, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn2_t=dict( type='NormConnector', in_channels=256, norm_cfg=norm_cfg)), distill_losses=dict( loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), # `loss_forward_mappings` are mappings between distill loss forward # arguments and records. loss_forward_mappings=dict( loss_fpn0=dict( preds_S=dict( from_student=True, recorder='fpn0', connector='fpn0_s'), preds_T=dict( from_student=False, recorder='fpn0', connector='fpn0_t')), loss_fpn1=dict( preds_S=dict( from_student=True, recorder='fpn1', connector='fpn1_s'), preds_T=dict( from_student=False, recorder='fpn1', connector='fpn1_t')), loss_fpn2=dict( preds_S=dict( from_student=True, recorder='fpn2', connector='fpn2_s'), preds_T=dict( from_student=False, recorder='fpn2', connector='fpn2_t'))))) find_unused_parameters = True custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49), dict( type='mmdet.PipelineSwitchHook', switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, switch_pipeline=_base_.train_pipeline_stage2), # stop distillation after the 280th epoch dict(type='mmrazor.StopDistillHook', stop_epoch=280) ]
4,108
40.09
181
py
mmyolo
mmyolo-main/configs/rtmdet/distillation/kd_tiny_rtmdet_s_neck_300e_coco.py
_base_ = '../rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py' teacher_ckpt = 'https://download.openmmlab.com/mmyolo/v0/rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco/rtmdet_s_syncbn_fast_8xb32-300e_coco_20221230_182329-0a8c901a.pth' # noqa: E501 norm_cfg = dict(type='BN', affine=False, track_running_stats=False) model = dict( _delete_=True, _scope_='mmrazor', type='FpnTeacherDistill', architecture=dict( cfg_path='mmyolo::rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py'), teacher=dict( cfg_path='mmyolo::rtmdet/rtmdet_s_syncbn_fast_8xb32-300e_coco.py'), teacher_ckpt=teacher_ckpt, distiller=dict( type='ConfigurableDistiller', # `recorders` are used to record various intermediate results during # the model forward. student_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv'), ), teacher_recorders=dict( fpn0=dict(type='ModuleOutputs', source='neck.out_layers.0.conv'), fpn1=dict(type='ModuleOutputs', source='neck.out_layers.1.conv'), fpn2=dict(type='ModuleOutputs', source='neck.out_layers.2.conv')), # `connectors` are adaptive layers which usually map teacher's and # students features to the same dimension. connectors=dict( fpn0_s=dict( type='ConvModuleConnector', in_channel=96, out_channel=128, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn0_t=dict( type='NormConnector', in_channels=128, norm_cfg=norm_cfg), fpn1_s=dict( type='ConvModuleConnector', in_channel=96, out_channel=128, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn1_t=dict( type='NormConnector', in_channels=128, norm_cfg=norm_cfg), fpn2_s=dict( type='ConvModuleConnector', in_channel=96, out_channel=128, bias=False, norm_cfg=norm_cfg, act_cfg=None), fpn2_t=dict( type='NormConnector', in_channels=128, norm_cfg=norm_cfg)), distill_losses=dict( loss_fpn0=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn1=dict(type='ChannelWiseDivergence', loss_weight=1), loss_fpn2=dict(type='ChannelWiseDivergence', loss_weight=1)), # `loss_forward_mappings` are mappings between distill loss forward # arguments and records. loss_forward_mappings=dict( loss_fpn0=dict( preds_S=dict( from_student=True, recorder='fpn0', connector='fpn0_s'), preds_T=dict( from_student=False, recorder='fpn0', connector='fpn0_t')), loss_fpn1=dict( preds_S=dict( from_student=True, recorder='fpn1', connector='fpn1_s'), preds_T=dict( from_student=False, recorder='fpn1', connector='fpn1_t')), loss_fpn2=dict( preds_S=dict( from_student=True, recorder='fpn2', connector='fpn2_s'), preds_T=dict( from_student=False, recorder='fpn2', connector='fpn2_t'))))) find_unused_parameters = True custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49), dict( type='mmdet.PipelineSwitchHook', switch_epoch=_base_.max_epochs - _base_.num_epochs_stage2, switch_pipeline=_base_.train_pipeline_stage2), # stop distillation after the 280th epoch dict(type='mmrazor.StopDistillHook', stop_epoch=280) ]
4,111
40.12
181
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_plus_l_fast_8xb8-80e_coco.py
_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_l_obj365_pretrained-3dd89562.pth' # noqa deepen_factor = 1.0 widen_factor = 1.0 model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
636
36.470588
133
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_x_fast_8xb16-300e_coco.py
_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_x_imagenet1k_pretrained-81c33ccb.pth' # noqa deepen_factor = 1.33 widen_factor = 1.25 train_batch_size_per_gpu = 16 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict(batch_size=train_batch_size_per_gpu)
793
32.083333
135
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_s_fast_8xb32-300e_coco.py
_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_s_imagenet1k_pretrained-2be81763.pth' # noqa train_batch_size_per_gpu = 32 max_epochs = 300 # Base learning rate for optim_wrapper base_lr = 0.01 model = dict( data_preprocessor=dict( mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], std=[0.229 * 255., 0.224 * 255., 0.225 * 255.]), backbone=dict( block_cfg=dict(use_alpha=False), init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint, map_location='cpu')), train_cfg=dict(initial_epoch=100)) train_dataloader = dict(batch_size=train_batch_size_per_gpu) optim_wrapper = dict(optimizer=dict(lr=base_lr)) default_hooks = dict(param_scheduler=dict(total_epochs=int(max_epochs * 1.2))) train_cfg = dict(max_epochs=max_epochs) # PPYOLOE plus use obj365 pretrained model, but PPYOLOE not, # `load_from` need to set to None. load_from = None
1,207
31.648649
135
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco.py
_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] # dataset settings data_root = 'data/coco/' dataset_type = 'YOLOv5CocoDataset' # parameters that often need to be modified img_scale = (640, 640) # width, height deepen_factor = 0.33 widen_factor = 0.5 max_epochs = 80 num_classes = 80 save_epoch_intervals = 5 train_batch_size_per_gpu = 8 train_num_workers = 8 val_batch_size_per_gpu = 1 val_num_workers = 2 # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_s_obj365_pretrained-bcfe8478.pth' # noqa # persistent_workers must be False if num_workers is 0. persistent_workers = True # Base learning rate for optim_wrapper base_lr = 0.001 strides = [8, 16, 32] model = dict( type='YOLODetector', data_preprocessor=dict( # use this to support multi_scale training type='PPYOLOEDetDataPreprocessor', pad_size_divisor=32, batch_augments=[ dict( type='PPYOLOEBatchRandomResize', random_size_range=(320, 800), interval=1, size_divisor=32, random_interp=True, keep_ratio=False) ], mean=[0., 0., 0.], std=[255., 255., 255.], bgr_to_rgb=True), backbone=dict( type='PPYOLOECSPResNet', deepen_factor=deepen_factor, widen_factor=widen_factor, block_cfg=dict( type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True), norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5), act_cfg=dict(type='SiLU', inplace=True), attention_cfg=dict( type='EffectiveSELayer', act_cfg=dict(type='HSigmoid')), use_large_stem=True), neck=dict( type='PPYOLOECSPPAFPN', in_channels=[256, 512, 1024], out_channels=[192, 384, 768], deepen_factor=deepen_factor, widen_factor=widen_factor, num_csplayer=1, num_blocks_per_layer=3, block_cfg=dict( type='PPYOLOEBasicBlock', shortcut=False, use_alpha=False), norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5), act_cfg=dict(type='SiLU', inplace=True), drop_block_cfg=None, use_spp=True), bbox_head=dict( type='PPYOLOEHead', head_module=dict( type='PPYOLOEHeadModule', num_classes=num_classes, in_channels=[192, 384, 768], widen_factor=widen_factor, featmap_strides=strides, reg_max=16, norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5), act_cfg=dict(type='SiLU', inplace=True), num_base_priors=1), prior_generator=dict( type='mmdet.MlvlPointGenerator', offset=0.5, strides=strides), bbox_coder=dict(type='DistancePointBBoxCoder'), loss_cls=dict( type='mmdet.VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, reduction='sum', loss_weight=1.0), loss_bbox=dict( type='IoULoss', iou_mode='giou', bbox_format='xyxy', reduction='mean', loss_weight=2.5, return_iou=False), # Since the dflloss is implemented differently in the official # and mmdet, we're going to divide loss_weight by 4. loss_dfl=dict( type='mmdet.DistributionFocalLoss', reduction='mean', loss_weight=0.5 / 4)), train_cfg=dict( initial_epoch=30, initial_assigner=dict( type='BatchATSSAssigner', num_classes=num_classes, topk=9, iou_calculator=dict(type='mmdet.BboxOverlaps2D')), assigner=dict( type='BatchTaskAlignedAssigner', num_classes=num_classes, topk=13, alpha=1, beta=6, eps=1e-9)), test_cfg=dict( multi_label=True, nms_pre=1000, score_thr=0.01, nms=dict(type='nms', iou_threshold=0.7), max_per_img=300)) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True), dict(type='PPYOLOERandomDistort'), dict(type='mmdet.Expand', mean=(103.53, 116.28, 123.675)), dict(type='PPYOLOERandomCrop'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, persistent_workers=persistent_workers, pin_memory=True, sampler=dict(type='DefaultSampler', shuffle=True), collate_fn=dict(type='yolov5_collate', use_ms_training=True), dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotations/instances_train2017.json', data_prefix=dict(img='train2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=0), pipeline=train_pipeline)) test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict( type='mmdet.FixShapeResize', width=img_scale[0], height=img_scale[1], keep_ratio=False, interpolation='bicubic'), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] val_dataloader = dict( batch_size=val_batch_size_per_gpu, num_workers=val_num_workers, persistent_workers=persistent_workers, pin_memory=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, test_mode=True, data_prefix=dict(img='val2017/'), filter_cfg=dict(filter_empty_gt=True, min_size=0), ann_file='annotations/instances_val2017.json', pipeline=test_pipeline)) test_dataloader = val_dataloader param_scheduler = None optim_wrapper = dict( type='OptimWrapper', optimizer=dict( type='SGD', lr=base_lr, momentum=0.9, weight_decay=5e-4, nesterov=False), paramwise_cfg=dict(norm_decay_mult=0.)) default_hooks = dict( param_scheduler=dict( type='PPYOLOEParamSchedulerHook', warmup_min_iter=1000, start_factor=0., warmup_epochs=5, min_lr_ratio=0.0, total_epochs=int(max_epochs * 1.2)), checkpoint=dict( type='CheckpointHook', interval=save_epoch_intervals, save_best='auto', max_keep_ckpts=3)) custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0002, update_buffers=True, strict_load=False, priority=49) ] val_evaluator = dict( type='mmdet.CocoMetric', proposal_nums=(100, 1, 10), ann_file=data_root + 'annotations/instances_val2017.json', metric='bbox') test_evaluator = val_evaluator train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=save_epoch_intervals) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop')
7,582
30.595833
133
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_l_fast_8xb20-300e_coco.py
_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_l_imagenet1k_pretrained-c0010e6c.pth' # noqa deepen_factor = 1.0 widen_factor = 1.0 train_batch_size_per_gpu = 20 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict(batch_size=train_batch_size_per_gpu)
791
32
135
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_plus_x_fast_8xb8-80e_coco.py
_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_x_obj365_pretrained-43a8000d.pth' # noqa deepen_factor = 1.33 widen_factor = 1.25 model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
638
36.588235
133
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_m_fast_8xb28-300e_coco.py
_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md checkpoint = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/cspresnet_m_imagenet1k_pretrained-09f1eba2.pth' # noqa deepen_factor = 0.67 widen_factor = 0.75 train_batch_size_per_gpu = 28 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, init_cfg=dict(checkpoint=checkpoint)), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict(batch_size=train_batch_size_per_gpu)
793
32.083333
135
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_plus_m_fast_8xb8-80e_coco.py
_base_ = './ppyoloe_plus_s_fast_8xb8-80e_coco.py' # The pretrained model is geted and converted from official PPYOLOE. # https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.5/configs/ppyoloe/README.md load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_pretrain/ppyoloe_plus_m_ojb365_pretrained-03206892.pth' # noqa deepen_factor = 0.67 widen_factor = 0.75 model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
638
36.588235
133
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_s_fast_8xb32-400e_coco.py
_base_ = './ppyoloe_s_fast_8xb32-300e_coco.py' max_epochs = 400 model = dict(train_cfg=dict(initial_epoch=133)) default_hooks = dict(param_scheduler=dict(total_epochs=int(max_epochs * 1.2))) train_cfg = dict(max_epochs=max_epochs)
235
22.6
78
py
mmyolo
mmyolo-main/configs/ppyoloe/ppyoloe_plus_s_fast_1xb12-40e_cat.py
# Compared to other same scale models, this configuration consumes too much # GPU memory and is not validated for now _base_ = 'ppyoloe_plus_s_fast_8xb8-80e_coco.py' data_root = './data/cat/' class_name = ('cat', ) num_classes = len(class_name) metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) num_last_epochs = 5 max_epochs = 40 train_batch_size_per_gpu = 12 train_num_workers = 2 load_from = 'https://download.openmmlab.com/mmyolo/v0/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco/ppyoloe_plus_s_fast_8xb8-80e_coco_20230101_154052-9fee7619.pth' # noqa model = dict( backbone=dict(frozen_stages=4), bbox_head=dict(head_module=dict(num_classes=num_classes)), train_cfg=dict( initial_assigner=dict(num_classes=num_classes), assigner=dict(num_classes=num_classes))) train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, dataset=dict( data_root=data_root, metainfo=metainfo, ann_file='annotations/trainval.json', data_prefix=dict(img='images/'))) val_dataloader = dict( dataset=dict( metainfo=metainfo, data_root=data_root, ann_file='annotations/test.json', data_prefix=dict(img='images/'))) test_dataloader = val_dataloader default_hooks = dict( param_scheduler=dict( warmup_min_iter=10, warmup_epochs=3, total_epochs=int(max_epochs * 1.2))) val_evaluator = dict(ann_file=data_root + 'annotations/test.json') test_evaluator = val_evaluator default_hooks = dict( checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), logger=dict(type='LoggerHook', interval=5)) train_cfg = dict(max_epochs=max_epochs, val_interval=10) # visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa
1,833
31.175439
167
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco.py
_base_ = './yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py' deepen_factor = 1.0 widen_factor = 1.0 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
369
22.125
64
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py
_base_ = 'yolov5_s-v61_syncbn_8xb16-300e_coco.py' # fast means faster training speed, # but less flexibility for multitasking model = dict( data_preprocessor=dict( type='YOLOv5DetDataPreprocessor', mean=[0., 0., 0.], std=[255., 255., 255.], bgr_to_rgb=True)) train_dataloader = dict(collate_fn=dict(type='yolov5_collate'))
361
26.846154
63
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_x-p6-v62_syncbn_fast_8xb16-300e_coco.py
_base_ = './yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py' deepen_factor = 1.33 widen_factor = 1.25 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
373
23.933333
64
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py
_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' # ========================modified parameters====================== data_root = 'data/balloon/' # Path of train annotation file train_ann_file = 'train.json' train_data_prefix = 'train/' # Prefix of train image path # Path of val annotation file val_ann_file = 'val.json' val_data_prefix = 'val/' # Prefix of val image path metainfo = { 'classes': ('balloon', ), 'palette': [ (220, 20, 60), ] } num_classes = 1 train_batch_size_per_gpu = 4 train_num_workers = 2 log_interval = 1 # =======================Unmodified in most cases================== train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, dataset=dict( data_root=data_root, metainfo=metainfo, data_prefix=dict(img=train_data_prefix), ann_file=train_ann_file)) val_dataloader = dict( dataset=dict( data_root=data_root, metainfo=metainfo, data_prefix=dict(img=val_data_prefix), ann_file=val_ann_file)) test_dataloader = val_dataloader val_evaluator = dict(ann_file=data_root + val_ann_file) test_evaluator = val_evaluator model = dict(bbox_head=dict(head_module=dict(num_classes=num_classes))) default_hooks = dict(logger=dict(interval=log_interval))
1,312
29.534884
71
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py
_base_ = './yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py' # ========================modified parameters====================== deepen_factor = 0.67 widen_factor = 0.75 lr_factor = 0.1 affine_scale = 0.9 loss_cls_weight = 0.3 loss_obj_weight = 0.7 mixup_prob = 0.1 # =======================Unmodified in most cases================== num_classes = _base_.num_classes num_det_layers = _base_.num_det_layers img_scale = _base_.img_scale model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict( head_module=dict(widen_factor=widen_factor), loss_cls=dict(loss_weight=loss_cls_weight * (num_classes / 80 * 3 / num_det_layers)), loss_obj=dict(loss_weight=loss_obj_weight * ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) pre_transform = _base_.pre_transform albu_train_transforms = _base_.albu_train_transforms mosaic_affine_pipeline = [ dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_shear_degree=0.0, scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)) ] # enable mixup train_pipeline = [ *pre_transform, *mosaic_affine_pipeline, dict( type='YOLOv5MixUp', prob=mixup_prob, pre_transform=[*pre_transform, *mosaic_affine_pipeline]), dict( type='mmdet.Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }), dict(type='YOLOv5HSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor))
2,411
29.15
74
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_608x352_cat.py
_base_ = 'yolov5_s-v61_fast_1xb12-40e_cat.py' # This configuration is used to provide non-square training examples # Must be a multiple of 32 img_scale = (608, 352) # w h anchors = [ [(65, 35), (159, 45), (119, 80)], # P3/8 [(215, 77), (224, 116), (170, 166)], # P4/16 [(376, 108), (339, 176), (483, 190)] # P5/32 ] # ===============================Unmodified in most cases==================== _base_.model.bbox_head.loss_obj.loss_weight = 1.0 * ((img_scale[1] / 640)**2) _base_.model.bbox_head.prior_generator.base_sizes = anchors train_pipeline = [ *_base_.pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=_base_.pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_shear_degree=0.0, scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)), dict( type='mmdet.Albu', transforms=_base_.albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }), dict(type='YOLOv5HSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] _base_.train_dataloader.dataset.pipeline = train_pipeline test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='YOLOv5KeepRatioResize', scale=img_scale), dict( type='LetterResize', scale=img_scale, allow_scale_up=False, pad_val=dict(img=114)), dict(type='mmdet.LoadAnnotations', with_bbox=True), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param')) ] val_dataloader = dict( dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None)) test_dataloader = val_dataloader
2,305
31.478873
79
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_l-p6-v62_syncbn_fast_8xb16-300e_coco.py
_base_ = './yolov5_m-p6-v62_syncbn_fast_8xb16-300e_coco.py' deepen_factor = 1.0 widen_factor = 1.0 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
372
22.3125
64
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_x-v61_syncbn_fast_8xb16-300e_coco.py
_base_ = './yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py' deepen_factor = 1.33 widen_factor = 1.25 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
370
23.733333
64
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py
_base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py'] # ========================Frequently modified parameters====================== # -----data related----- data_root = 'data/coco/' # Root path of data # Path of train annotation file train_ann_file = 'annotations/instances_train2017.json' train_data_prefix = 'train2017/' # Prefix of train image path # Path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of val image path num_classes = 80 # Number of classes for classification # Batch size of a single GPU during training train_batch_size_per_gpu = 16 # Worker to pre-fetch data for each single GPU during training train_num_workers = 8 # persistent_workers must be False if num_workers is 0 persistent_workers = True # -----model related----- # Basic size of multi-scale prior box anchors = [ [(10, 13), (16, 30), (33, 23)], # P3/8 [(30, 61), (62, 45), (59, 119)], # P4/16 [(116, 90), (156, 198), (373, 326)] # P5/32 ] # -----train val related----- # Base learning rate for optim_wrapper. Corresponding to 8xb16=128 bs base_lr = 0.01 max_epochs = 300 # Maximum training epochs model_test_cfg = dict( # The config of multi-label for multi-class prediction. multi_label=True, # The number of boxes before NMS nms_pre=30000, score_thr=0.001, # Threshold to filter out boxes. nms=dict(type='nms', iou_threshold=0.65), # NMS type and threshold max_per_img=300) # Max number of detections of each image # ========================Possible modified parameters======================== # -----data related----- img_scale = (640, 640) # width, height # Dataset type, this will be used to define the dataset dataset_type = 'YOLOv5CocoDataset' # Batch size of a single GPU during validation val_batch_size_per_gpu = 1 # Worker to pre-fetch data for each single GPU during validation val_num_workers = 2 # Config of batch shapes. Only on val. # It means not used if batch_shapes_cfg is None. batch_shapes_cfg = dict( type='BatchShapePolicy', batch_size=val_batch_size_per_gpu, img_size=img_scale[0], # The image scale of padding should be divided by pad_size_divisor size_divisor=32, # Additional paddings for pixel scale extra_pad_ratio=0.5) # -----model related----- # The scaling factor that controls the depth of the network structure deepen_factor = 0.33 # The scaling factor that controls the width of the network structure widen_factor = 0.5 # Strides of multi-scale prior box strides = [8, 16, 32] num_det_layers = 3 # The number of model output scales norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) # Normalization config # -----train val related----- affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio loss_cls_weight = 0.5 loss_bbox_weight = 0.05 loss_obj_weight = 1.0 prior_match_thr = 4. # Priori box matching threshold # The obj loss weights of the three output layers obj_level_weights = [4., 1., 0.4] lr_factor = 0.01 # Learning rate scaling factor weight_decay = 0.0005 # Save model checkpoint and validation intervals save_checkpoint_intervals = 10 # The maximum checkpoints to keep. max_keep_ckpts = 3 # Single-scale training is recommended to # be turned on, which can speed up training. env_cfg = dict(cudnn_benchmark=True) # ===============================Unmodified in most cases==================== model = dict( type='YOLODetector', data_preprocessor=dict( type='mmdet.DetDataPreprocessor', mean=[0., 0., 0.], std=[255., 255., 255.], bgr_to_rgb=True), backbone=dict( type='YOLOv5CSPDarknet', deepen_factor=deepen_factor, widen_factor=widen_factor, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True)), neck=dict( type='YOLOv5PAFPN', deepen_factor=deepen_factor, widen_factor=widen_factor, in_channels=[256, 512, 1024], out_channels=[256, 512, 1024], num_csp_blocks=3, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True)), bbox_head=dict( type='YOLOv5Head', head_module=dict( type='YOLOv5HeadModule', num_classes=num_classes, in_channels=[256, 512, 1024], widen_factor=widen_factor, featmap_strides=strides, num_base_priors=3), prior_generator=dict( type='mmdet.YOLOAnchorGenerator', base_sizes=anchors, strides=strides), # scaled based on number of detection layers loss_cls=dict( type='mmdet.CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=loss_cls_weight * (num_classes / 80 * 3 / num_det_layers)), loss_bbox=dict( type='IoULoss', iou_mode='ciou', bbox_format='xywh', eps=1e-7, reduction='mean', loss_weight=loss_bbox_weight * (3 / num_det_layers), return_iou=True), loss_obj=dict( type='mmdet.CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=loss_obj_weight * ((img_scale[0] / 640)**2 * 3 / num_det_layers)), prior_match_thr=prior_match_thr, obj_level_weights=obj_level_weights), test_cfg=model_test_cfg) albu_train_transforms = [ dict(type='Blur', p=0.01), dict(type='MedianBlur', p=0.01), dict(type='ToGray', p=0.01), dict(type='CLAHE', p=0.01) ] pre_transform = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True) ] train_pipeline = [ *pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_shear_degree=0.0, scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)), dict( type='mmdet.Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }), dict(type='YOLOv5HSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, persistent_workers=persistent_workers, pin_memory=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type=dataset_type, data_root=data_root, ann_file=train_ann_file, data_prefix=dict(img=train_data_prefix), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=train_pipeline)) test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='YOLOv5KeepRatioResize', scale=img_scale), dict( type='LetterResize', scale=img_scale, allow_scale_up=False, pad_val=dict(img=114)), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param')) ] val_dataloader = dict( batch_size=val_batch_size_per_gpu, num_workers=val_num_workers, persistent_workers=persistent_workers, pin_memory=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, test_mode=True, data_prefix=dict(img=val_data_prefix), ann_file=val_ann_file, pipeline=test_pipeline, batch_shapes_cfg=batch_shapes_cfg)) test_dataloader = val_dataloader param_scheduler = None optim_wrapper = dict( type='OptimWrapper', optimizer=dict( type='SGD', lr=base_lr, momentum=0.937, weight_decay=weight_decay, nesterov=True, batch_size_per_gpu=train_batch_size_per_gpu), constructor='YOLOv5OptimizerConstructor') default_hooks = dict( param_scheduler=dict( type='YOLOv5ParamSchedulerHook', scheduler_type='linear', lr_factor=lr_factor, max_epochs=max_epochs), checkpoint=dict( type='CheckpointHook', interval=save_checkpoint_intervals, save_best='auto', max_keep_ckpts=max_keep_ckpts)) custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0001, update_buffers=True, strict_load=False, priority=49) ] val_evaluator = dict( type='mmdet.CocoMetric', proposal_nums=(100, 1, 10), ann_file=data_root + val_ann_file, metric='bbox') test_evaluator = val_evaluator train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=save_checkpoint_intervals) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop')
9,466
31.31058
78
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py
_base_ = 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' # ========================modified parameters====================== img_scale = (1280, 1280) # width, height num_classes = 80 # Number of classes for classification # Config of batch shapes. Only on val. # It means not used if batch_shapes_cfg is None. batch_shapes_cfg = dict( img_size=img_scale[0], # The image scale of padding should be divided by pad_size_divisor size_divisor=64) # Basic size of multi-scale prior box anchors = [ [(19, 27), (44, 40), (38, 94)], # P3/8 [(96, 68), (86, 152), (180, 137)], # P4/16 [(140, 301), (303, 264), (238, 542)], # P5/32 [(436, 615), (739, 380), (925, 792)] # P6/64 ] # Strides of multi-scale prior box strides = [8, 16, 32, 64] num_det_layers = 4 # The number of model output scales loss_cls_weight = 0.5 loss_bbox_weight = 0.05 loss_obj_weight = 1.0 # The obj loss weights of the three output layers obj_level_weights = [4.0, 1.0, 0.25, 0.06] affine_scale = 0.5 # YOLOv5RandomAffine scaling ratio tta_img_scales = [(1280, 1280), (1024, 1024), (1536, 1536)] # =======================Unmodified in most cases================== model = dict( backbone=dict(arch='P6', out_indices=(2, 3, 4, 5)), neck=dict( in_channels=[256, 512, 768, 1024], out_channels=[256, 512, 768, 1024]), bbox_head=dict( head_module=dict( in_channels=[256, 512, 768, 1024], featmap_strides=strides), prior_generator=dict(base_sizes=anchors, strides=strides), # scaled based on number of detection layers loss_cls=dict(loss_weight=loss_cls_weight * (num_classes / 80 * 3 / num_det_layers)), loss_bbox=dict(loss_weight=loss_bbox_weight * (3 / num_det_layers)), loss_obj=dict(loss_weight=loss_obj_weight * ((img_scale[0] / 640)**2 * 3 / num_det_layers)), obj_level_weights=obj_level_weights)) pre_transform = _base_.pre_transform albu_train_transforms = _base_.albu_train_transforms train_pipeline = [ *pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_shear_degree=0.0, scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)), dict( type='mmdet.Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }), dict(type='YOLOv5HSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='YOLOv5KeepRatioResize', scale=img_scale), dict( type='LetterResize', scale=img_scale, allow_scale_up=False, pad_val=dict(img=114)), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param')) ] val_dataloader = dict( dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=batch_shapes_cfg)) test_dataloader = val_dataloader # Config for Test Time Augmentation. (TTA) _multiscale_resize_transforms = [ dict( type='Compose', transforms=[ dict(type='YOLOv5KeepRatioResize', scale=s), dict( type='LetterResize', scale=s, allow_scale_up=False, pad_val=dict(img=114)) ]) for s in tta_img_scales ] tta_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict( type='TestTimeAug', transforms=[ _multiscale_resize_transforms, [ dict(type='mmdet.RandomFlip', prob=1.), dict(type='mmdet.RandomFlip', prob=0.) ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)], [ dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param', 'flip', 'flip_direction')) ] ]) ]
4,851
33.906475
79
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_n-p6-v62_syncbn_fast_8xb16-300e_coco.py
_base_ = 'yolov5_s-p6-v62_syncbn_fast_8xb16-300e_coco.py' deepen_factor = 0.33 widen_factor = 0.25 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
372
22.3125
64
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco.py
_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' # ========================modified parameters====================== deepen_factor = 0.67 widen_factor = 0.75 lr_factor = 0.1 affine_scale = 0.9 loss_cls_weight = 0.3 loss_obj_weight = 0.7 mixup_prob = 0.1 # =======================Unmodified in most cases================== num_classes = _base_.num_classes num_det_layers = _base_.num_det_layers img_scale = _base_.img_scale model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict( head_module=dict(widen_factor=widen_factor), loss_cls=dict(loss_weight=loss_cls_weight * (num_classes / 80 * 3 / num_det_layers)), loss_obj=dict(loss_weight=loss_obj_weight * ((img_scale[0] / 640)**2 * 3 / num_det_layers)))) pre_transform = _base_.pre_transform albu_train_transforms = _base_.albu_train_transforms mosaic_affine_pipeline = [ dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_shear_degree=0.0, scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)) ] # enable mixup train_pipeline = [ *pre_transform, *mosaic_affine_pipeline, dict( type='YOLOv5MixUp', prob=mixup_prob, pre_transform=[*pre_transform, *mosaic_affine_pipeline]), dict( type='mmdet.Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }), dict(type='YOLOv5HSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) default_hooks = dict(param_scheduler=dict(lr_factor=lr_factor))
2,408
29.1125
74
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_cat.py
_base_ = 'yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' data_root = './data/cat/' class_name = ('cat', ) num_classes = len(class_name) metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) anchors = [ [(68, 69), (154, 91), (143, 162)], # P3/8 [(242, 160), (189, 287), (391, 207)], # P4/16 [(353, 337), (539, 341), (443, 432)] # P5/32 ] max_epochs = 40 train_batch_size_per_gpu = 12 train_num_workers = 4 load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa model = dict( backbone=dict(frozen_stages=4), bbox_head=dict( head_module=dict(num_classes=num_classes), prior_generator=dict(base_sizes=anchors))) train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, dataset=dict( data_root=data_root, metainfo=metainfo, ann_file='annotations/trainval.json', data_prefix=dict(img='images/'))) val_dataloader = dict( dataset=dict( metainfo=metainfo, data_root=data_root, ann_file='annotations/test.json', data_prefix=dict(img='images/'))) test_dataloader = val_dataloader _base_.optim_wrapper.optimizer.batch_size_per_gpu = train_batch_size_per_gpu val_evaluator = dict(ann_file=data_root + 'annotations/test.json') test_evaluator = val_evaluator default_hooks = dict( checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), # The warmup_mim_iter parameter is critical. # The default value is 1000 which is not suitable for cat datasets. param_scheduler=dict(max_epochs=max_epochs, warmup_mim_iter=10), logger=dict(type='LoggerHook', interval=5)) train_cfg = dict(max_epochs=max_epochs, val_interval=10) # visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa
1,932
32.912281
180
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py
_base_ = './yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' deepen_factor = 0.33 widen_factor = 0.25 model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
371
22.25
64
py
mmyolo
mmyolo-main/configs/yolov5/yolov5_s-v61_syncbn-detect_8xb16-300e_coco.py
_base_ = 'yolov5_s-v61_syncbn_8xb16-300e_coco.py' test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict( type='LetterResize', scale=_base_.img_scale, allow_scale_up=True, use_mini_pad=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param')) ] val_dataloader = dict( dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None)) test_dataloader = val_dataloader model = dict( test_cfg=dict( multi_label=False, score_thr=0.25, nms=dict(iou_threshold=0.45)))
719
29
77
py
mmyolo
mmyolo-main/configs/yolov5/crowdhuman/yolov5_s-v61_8xb16-300e_ignore_crowdhuman.py
_base_ = 'yolov5_s-v61_fast_8xb16-300e_crowdhuman.py' model = dict( data_preprocessor=dict( _delete_=True, type='mmdet.DetDataPreprocessor', mean=[0., 0., 0.], std=[255., 255., 255.], bgr_to_rgb=True), bbox_head=dict(ignore_iof_thr=0.5)) img_scale = _base_.img_scale albu_train_transforms = [ dict(type='Blur', p=0.01), dict(type='MedianBlur', p=0.01), dict(type='ToGray', p=0.01), dict(type='CLAHE', p=0.01) ] pre_transform = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), # only change this dict(type='mmdet.LoadAnnotations', with_bbox=True) ] train_pipeline = [ *pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_shear_degree=0.0, scaling_ratio_range=(0.5, 1.5), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)), dict( type='mmdet.Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }), dict(type='YOLOv5HSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_dataloader = dict( collate_fn=dict(type='pseudo_collate'), dataset=dict(pipeline=train_pipeline))
1,780
26.828125
77
py
mmyolo
mmyolo-main/configs/yolov5/crowdhuman/yolov5_s-v61_fast_8xb16-300e_crowdhuman.py
_base_ = '../yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' # Use the model trained on the COCO as the pretrained model load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa # dataset settings data_root = 'data/CrowdHuman/' dataset_type = 'YOLOv5CrowdHumanDataset' # parameters that often need to be modified num_classes = 1 anchors = [ [(6, 14), (12, 28), (19, 48)], # P3/8 [(29, 79), (46, 124), (142, 54)], # P4/16 [(73, 198), (124, 330), (255, 504)] # P5/32 ] model = dict( bbox_head=dict( head_module=dict(num_classes=num_classes), prior_generator=dict(base_sizes=anchors))) train_dataloader = dict( dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotation_train.odgt', data_prefix=dict(img='Images/'))) val_dataloader = dict( dataset=dict( type=dataset_type, data_root=data_root, ann_file='annotation_val.odgt', data_prefix=dict(img='Images/'), # CrowdHumanMetric does not support out-of-order output images # for the time being. batch_shapes_cfg does not support. batch_shapes_cfg=None)) test_dataloader = val_dataloader val_evaluator = dict( _delete_=True, type='mmdet.CrowdHumanMetric', ann_file=data_root + 'annotation_val.odgt', metric=['AP', 'MR', 'JI']) test_evaluator = val_evaluator
1,495
30.166667
180
py
mmyolo
mmyolo-main/configs/yolov5/voc/yolov5_l-v61_fast_1xb32-50e_voc.py
_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' deepen_factor = 1.0 widen_factor = 1.0 train_batch_size_per_gpu = 32 train_num_workers = 8 load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_l-v61_syncbn_fast_8xb16-300e_coco/yolov5_l-v61_syncbn_fast_8xb16-300e_coco_20220917_031007-096ef0eb.pth' # noqa model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers) optim_wrapper = dict( optimizer=dict(batch_size_per_gpu=train_batch_size_per_gpu))
780
29.038462
180
py
mmyolo
mmyolo-main/configs/yolov5/voc/yolov5_m-v61_fast_1xb64-50e_voc.py
_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' deepen_factor = 0.67 widen_factor = 0.75 load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_m-v61_syncbn_fast_8xb16-300e_coco/yolov5_m-v61_syncbn_fast_8xb16-300e_coco_20220917_204944-516a710f.pth' # noqa model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
544
29.277778
180
py
mmyolo
mmyolo-main/configs/yolov5/voc/yolov5_n-v61_fast_1xb64-50e_voc.py
_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' deepen_factor = 0.33 widen_factor = 0.25 load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco/yolov5_n-v61_syncbn_fast_8xb16-300e_coco_20220919_090739-b804c1ad.pth' # noqa model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
544
29.277778
180
py
mmyolo
mmyolo-main/configs/yolov5/voc/yolov5_s-v61_fast_1xb64-50e_voc.py
_base_ = '../yolov5_s-v61_syncbn_fast_8xb16-300e_coco.py' # dataset settings data_root = 'data/VOCdevkit/' dataset_type = 'YOLOv5VOCDataset' # parameters that often need to be modified num_classes = 20 img_scale = (512, 512) # width, height max_epochs = 50 train_batch_size_per_gpu = 64 train_num_workers = 8 val_batch_size_per_gpu = 1 val_num_workers = 2 # persistent_workers must be False if num_workers is 0. persistent_workers = True lr_factor = 0.15135 affine_scale = 0.75544 # only on Val batch_shapes_cfg = dict(img_size=img_scale[0]) anchors = [[(26, 44), (67, 57), (61, 130)], [(121, 118), (120, 239), (206, 182)], [(376, 161), (234, 324), (428, 322)]] num_det_layers = 3 load_from = 'https://download.openmmlab.com/mmyolo/v0/yolov5/yolov5_s-v61_syncbn_fast_8xb16-300e_coco/yolov5_s-v61_syncbn_fast_8xb16-300e_coco_20220918_084700-86e02187.pth' # noqa tta_img_scales = [img_scale, (416, 416), (640, 640)] # Hyperparameter reference from: # https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.VOC.yaml model = dict( bbox_head=dict( head_module=dict(num_classes=num_classes), prior_generator=dict(base_sizes=anchors), loss_cls=dict( loss_weight=0.21638 * (num_classes / 80 * 3 / num_det_layers), class_weight=0.5), loss_bbox=dict(loss_weight=0.02 * (3 / num_det_layers)), loss_obj=dict( loss_weight=0.51728 * ((img_scale[0] / 640)**2 * 3 / num_det_layers), class_weight=0.67198), # Different from COCO prior_match_thr=3.3744), test_cfg=dict(nms=dict(iou_threshold=0.6))) albu_train_transforms = _base_.albu_train_transforms pre_transform = _base_.pre_transform with_mosiac_pipeline = [ dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_translate_ratio=0.04591, max_shear_degree=0.0, scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)), dict( type='YOLOv5MixUp', prob=0.04266, pre_transform=[ *pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_translate_ratio=0.04591, max_shear_degree=0.0, scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2), border_val=(114, 114, 114)) ]) ] without_mosaic_pipeline = [ dict( type='YOLOv5RandomAffine', max_rotate_degree=0.0, max_translate_ratio=0.04591, max_shear_degree=0.0, scaling_ratio_range=(1 - affine_scale, 1 + affine_scale), border=(0, 0), border_val=(114, 114, 114)), dict( type='LetterResize', scale=img_scale, allow_scale_up=True, pad_val=dict(img=114)) ] # Because the border parameter is inconsistent when # using mosaic or not, `RandomChoice` is used here. randchoice_mosaic_pipeline = dict( type='RandomChoice', transforms=[with_mosiac_pipeline, without_mosaic_pipeline], prob=[0.85834, 0.14166]) train_pipeline = [ *pre_transform, randchoice_mosaic_pipeline, dict( type='mmdet.Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_bboxes_labels', 'gt_ignore_flags']), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }), dict( type='YOLOv5HSVRandomAug', hue_delta=0.01041, saturation_delta=0.54703, value_delta=0.27739), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_dataloader = dict( _delete_=True, batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, persistent_workers=persistent_workers, pin_memory=True, sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type='ConcatDataset', datasets=[ dict( type=dataset_type, data_root=data_root, ann_file='VOC2007/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2007/'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=train_pipeline), dict( type=dataset_type, data_root=data_root, ann_file='VOC2012/ImageSets/Main/trainval.txt', data_prefix=dict(sub_data_root='VOC2012/'), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=train_pipeline) ], # Use ignore_keys to avoid judging metainfo is # not equal in `ConcatDataset`. ignore_keys='dataset_type'), collate_fn=dict(type='yolov5_collate')) test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='YOLOv5KeepRatioResize', scale=img_scale), dict( type='LetterResize', scale=img_scale, allow_scale_up=False, pad_val=dict(img=114)), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param')) ] val_dataloader = dict( batch_size=val_batch_size_per_gpu, num_workers=val_num_workers, persistent_workers=persistent_workers, pin_memory=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file='VOC2007/ImageSets/Main/test.txt', data_prefix=dict(sub_data_root='VOC2007/'), test_mode=True, pipeline=test_pipeline, batch_shapes_cfg=batch_shapes_cfg)) test_dataloader = val_dataloader param_scheduler = None optim_wrapper = dict( optimizer=dict( lr=0.00334, momentum=0.74832, weight_decay=0.00025, batch_size_per_gpu=train_batch_size_per_gpu)) default_hooks = dict( param_scheduler=dict( lr_factor=lr_factor, max_epochs=max_epochs, warmup_epochs=3.3835, warmup_momentum=0.59462, warmup_bias_lr=0.18657)) custom_hooks = [ dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=0.0001, update_buffers=True, # To load COCO pretrained model, need to set `strict_load=False` strict_load=False, priority=49) ] # TODO: Support using coco metric in voc dataset val_evaluator = dict( _delete_=True, type='mmdet.VOCMetric', metric='mAP', eval_mode='area') test_evaluator = val_evaluator train_cfg = dict(max_epochs=max_epochs) # Config for Test Time Augmentation. (TTA) _multiscale_resize_transforms = [ dict( type='Compose', transforms=[ dict(type='YOLOv5KeepRatioResize', scale=s), dict( type='LetterResize', scale=s, allow_scale_up=False, pad_val=dict(img=114)) ]) for s in tta_img_scales ] tta_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict( type='TestTimeAug', transforms=[ _multiscale_resize_transforms, [ dict(type='mmdet.RandomFlip', prob=1.), dict(type='mmdet.RandomFlip', prob=0.) ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)], [ dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param', 'flip', 'flip_direction')) ] ]) ]
8,555
30.571956
180
py
mmyolo
mmyolo-main/configs/yolov5/voc/yolov5_x-v61_fast_1xb32-50e_voc.py
_base_ = './yolov5_s-v61_fast_1xb64-50e_voc.py' deepen_factor = 1.33 widen_factor = 1.25 train_batch_size_per_gpu = 32 train_num_workers = 8 # TODO: need to add pretrained_model load_from = None model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, ), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers) optim_wrapper = dict( optimizer=dict(batch_size_per_gpu=train_batch_size_per_gpu))
655
23.296296
71
py
mmyolo
mmyolo-main/configs/yolox/yolox_p5_tta.py
# TODO: Need to solve the problem of multiple file_client_args parameters # _file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) _file_client_args = dict(backend='disk') tta_model = dict( type='mmdet.DetTTAModel', tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=300)) img_scales = [(640, 640), (320, 320), (960, 960)] # LoadImageFromFile # / | \ # Resize Resize Resize # noqa # / \ / \ / \ # RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip # noqa # | | | | | | # LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn # | | | | | | # PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn # noqa tta_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_file_client_args), dict( type='TestTimeAug', transforms=[ [ dict(type='mmdet.Resize', scale=s, keep_ratio=True) for s in img_scales ], [ # ``RandomFlip`` must be placed before ``Pad``, otherwise # bounding box coordinates after flipping cannot be # recovered correctly. dict(type='mmdet.RandomFlip', prob=1.), dict(type='mmdet.RandomFlip', prob=0.) ], [ dict( type='mmdet.Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), ], [ dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')) ] ]) ]
2,240
39.017857
87
py
mmyolo
mmyolo-main/configs/yolox/yolox_s_fast_8xb8-300e_coco.py
_base_ = ['../_base_/default_runtime.py', 'yolox_p5_tta.py'] # ========================Frequently modified parameters====================== # -----data related----- data_root = 'data/coco/' # Root path of data # path of train annotation file train_ann_file = 'annotations/instances_train2017.json' train_data_prefix = 'train2017/' # Prefix of train image path # path of val annotation file val_ann_file = 'annotations/instances_val2017.json' val_data_prefix = 'val2017/' # Prefix of train image path num_classes = 80 # Number of classes for classification # Batch size of a single GPU during training train_batch_size_per_gpu = 8 # Worker to pre-fetch data for each single GPU during tarining train_num_workers = 8 # Presistent_workers must be False if num_workers is 0 persistent_workers = True # -----train val related----- # Base learning rate for optim_wrapper. Corresponding to 8xb16=64 bs base_lr = 0.01 max_epochs = 300 # Maximum training epochs model_test_cfg = dict( yolox_style=True, # better # The config of multi-label for multi-class prediction multi_label=True, # 40.5 -> 40.7 score_thr=0.001, # Threshold to filter out boxes max_per_img=300, # Max number of detections of each image nms=dict(type='nms', iou_threshold=0.65)) # NMS type and threshold # ========================Possible modified parameters======================== # -----data related----- img_scale = (640, 640) # width, height # Dataset type, this will be used to define the dataset dataset_type = 'YOLOv5CocoDataset' # Batch size of a single GPU during validation val_batch_size_per_gpu = 1 # Worker to pre-fetch data for each single GPU during validation val_num_workers = 2 # -----model related----- # The scaling factor that controls the depth of the network structure deepen_factor = 0.33 # The scaling factor that controls the width of the network structure widen_factor = 0.5 norm_cfg = dict(type='BN', momentum=0.03, eps=0.001) # generate new random resize shape interval batch_augments_interval = 10 # -----train val related----- weight_decay = 0.0005 loss_cls_weight = 1.0 loss_bbox_weight = 5.0 loss_obj_weight = 1.0 loss_bbox_aux_weight = 1.0 center_radius = 2.5 # SimOTAAssigner num_last_epochs = 15 random_affine_scaling_ratio_range = (0.1, 2) mixup_ratio_range = (0.8, 1.6) # Save model checkpoint and validation intervals save_epoch_intervals = 10 # The maximum checkpoints to keep. max_keep_ckpts = 3 ema_momentum = 0.0001 # ===============================Unmodified in most cases==================== # model settings model = dict( type='YOLODetector', init_cfg=dict( type='Kaiming', layer='Conv2d', a=2.23606797749979, # math.sqrt(5) distribution='uniform', mode='fan_in', nonlinearity='leaky_relu'), # TODO: Waiting for mmengine support use_syncbn=False, data_preprocessor=dict( type='YOLOv5DetDataPreprocessor', pad_size_divisor=32, batch_augments=[ dict( type='YOLOXBatchSyncRandomResize', random_size_range=(480, 800), size_divisor=32, interval=batch_augments_interval) ]), backbone=dict( type='YOLOXCSPDarknet', deepen_factor=deepen_factor, widen_factor=widen_factor, out_indices=(2, 3, 4), spp_kernal_sizes=(5, 9, 13), norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True), ), neck=dict( type='YOLOXPAFPN', deepen_factor=deepen_factor, widen_factor=widen_factor, in_channels=[256, 512, 1024], out_channels=256, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True)), bbox_head=dict( type='YOLOXHead', head_module=dict( type='YOLOXHeadModule', num_classes=num_classes, in_channels=256, feat_channels=256, widen_factor=widen_factor, stacked_convs=2, featmap_strides=(8, 16, 32), use_depthwise=False, norm_cfg=norm_cfg, act_cfg=dict(type='SiLU', inplace=True), ), loss_cls=dict( type='mmdet.CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=loss_cls_weight), loss_bbox=dict( type='mmdet.IoULoss', mode='square', eps=1e-16, reduction='sum', loss_weight=loss_bbox_weight), loss_obj=dict( type='mmdet.CrossEntropyLoss', use_sigmoid=True, reduction='sum', loss_weight=loss_obj_weight), loss_bbox_aux=dict( type='mmdet.L1Loss', reduction='sum', loss_weight=loss_bbox_aux_weight)), train_cfg=dict( assigner=dict( type='mmdet.SimOTAAssigner', center_radius=center_radius, iou_calculator=dict(type='mmdet.BboxOverlaps2D'))), test_cfg=model_test_cfg) pre_transform = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='LoadAnnotations', with_bbox=True) ] train_pipeline_stage1 = [ *pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='mmdet.RandomAffine', scaling_ratio_range=random_affine_scaling_ratio_range, # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict( type='YOLOXMixUp', img_scale=img_scale, ratio_range=mixup_ratio_range, pad_val=114.0, pre_transform=pre_transform), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] train_pipeline_stage2 = [ *pre_transform, dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), dict( type='mmdet.Pad', pad_to_square=True, # If the image is three-channel, the pad value needs # to be set separately for each channel. pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='mmdet.PackDetInputs') ] train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, persistent_workers=persistent_workers, pin_memory=True, collate_fn=dict(type='yolov5_collate'), sampler=dict(type='DefaultSampler', shuffle=True), dataset=dict( type=dataset_type, data_root=data_root, ann_file=train_ann_file, data_prefix=dict(img=train_data_prefix), filter_cfg=dict(filter_empty_gt=False, min_size=32), pipeline=train_pipeline_stage1)) test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='mmdet.Resize', scale=img_scale, keep_ratio=True), dict( type='mmdet.Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] val_dataloader = dict( batch_size=val_batch_size_per_gpu, num_workers=val_num_workers, persistent_workers=persistent_workers, pin_memory=True, drop_last=False, sampler=dict(type='DefaultSampler', shuffle=False), dataset=dict( type=dataset_type, data_root=data_root, ann_file=val_ann_file, data_prefix=dict(img=val_data_prefix), test_mode=True, pipeline=test_pipeline)) test_dataloader = val_dataloader # Reduce evaluation time val_evaluator = dict( type='mmdet.CocoMetric', proposal_nums=(100, 1, 10), ann_file=data_root + val_ann_file, metric='bbox') test_evaluator = val_evaluator # optimizer # default 8 gpu optim_wrapper = dict( type='OptimWrapper', optimizer=dict( type='SGD', lr=base_lr, momentum=0.9, weight_decay=weight_decay, nesterov=True), paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) # learning rate param_scheduler = [ dict( # use quadratic formula to warm up 5 epochs # and lr is updated by iteration # TODO: fix default scope in get function type='mmdet.QuadraticWarmupLR', by_epoch=True, begin=0, end=5, convert_to_iter_based=True), dict( # use cosine lr from 5 to 285 epoch type='CosineAnnealingLR', eta_min=base_lr * 0.05, begin=5, T_max=max_epochs - num_last_epochs, end=max_epochs - num_last_epochs, by_epoch=True, convert_to_iter_based=True), dict( # use fixed lr during last 15 epochs type='ConstantLR', by_epoch=True, factor=1, begin=max_epochs - num_last_epochs, end=max_epochs, ) ] default_hooks = dict( checkpoint=dict( type='CheckpointHook', interval=save_epoch_intervals, max_keep_ckpts=max_keep_ckpts, save_best='auto')) custom_hooks = [ dict( type='YOLOXModeSwitchHook', num_last_epochs=num_last_epochs, new_train_pipeline=train_pipeline_stage2, priority=48), dict(type='mmdet.SyncNormHook', priority=48), dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=ema_momentum, update_buffers=True, strict_load=False, priority=49) ] train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=max_epochs, val_interval=save_epoch_intervals, dynamic_intervals=[(max_epochs - num_last_epochs, 1)]) auto_scale_lr = dict(base_batch_size=8 * train_batch_size_per_gpu) val_cfg = dict(type='ValLoop') test_cfg = dict(type='TestLoop')
10,329
30.114458
78
py
mmyolo
mmyolo-main/configs/yolox/yolox_s_fast_1xb12-40e-rtmdet-hyp_cat.py
_base_ = './yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py' data_root = './data/cat/' class_name = ('cat', ) num_classes = len(class_name) metainfo = dict(classes=class_name, palette=[(20, 220, 60)]) num_last_epochs = 5 max_epochs = 40 train_batch_size_per_gpu = 12 train_num_workers = 4 load_from = 'https://download.openmmlab.com/mmyolo/v0/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco_20230210_134645-3a8dfbd7.pth' # noqa model = dict( backbone=dict(frozen_stages=4), bbox_head=dict(head_module=dict(num_classes=num_classes))) train_dataloader = dict( batch_size=train_batch_size_per_gpu, num_workers=train_num_workers, dataset=dict( data_root=data_root, metainfo=metainfo, ann_file='annotations/trainval.json', data_prefix=dict(img='images/'))) val_dataloader = dict( dataset=dict( metainfo=metainfo, data_root=data_root, ann_file='annotations/test.json', data_prefix=dict(img='images/'))) test_dataloader = val_dataloader param_scheduler = [ dict( # use quadratic formula to warm up 3 epochs # and lr is updated by iteration # TODO: fix default scope in get function type='mmdet.QuadraticWarmupLR', by_epoch=True, begin=0, end=3, convert_to_iter_based=True), dict( # use cosine lr from 5 to 35 epoch type='CosineAnnealingLR', eta_min=_base_.base_lr * 0.05, begin=5, T_max=max_epochs - num_last_epochs, end=max_epochs - num_last_epochs, by_epoch=True, convert_to_iter_based=True), dict( # use fixed lr during last num_last_epochs epochs type='ConstantLR', by_epoch=True, factor=1, begin=max_epochs - num_last_epochs, end=max_epochs, ) ] _base_.custom_hooks[0].num_last_epochs = num_last_epochs val_evaluator = dict(ann_file=data_root + 'annotations/test.json') test_evaluator = val_evaluator default_hooks = dict( checkpoint=dict(interval=10, max_keep_ckpts=2, save_best='auto'), logger=dict(type='LoggerHook', interval=5)) train_cfg = dict(max_epochs=max_epochs, val_interval=10) # visualizer = dict(vis_backends = [dict(type='LocalVisBackend'), dict(type='WandbVisBackend')]) # noqa
2,326
29.220779
177
py
mmyolo
mmyolo-main/configs/yolox/yolox_nano_fast_8xb32-300e-rtmdet-hyp_coco.py
_base_ = './yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py' # ========================modified parameters====================== deepen_factor = 0.33 widen_factor = 0.25 use_depthwise = True # =======================Unmodified in most cases================== # model settings model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, use_depthwise=use_depthwise), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, use_depthwise=use_depthwise), bbox_head=dict( head_module=dict( widen_factor=widen_factor, use_depthwise=use_depthwise)))
660
29.045455
69
py
mmyolo
mmyolo-main/configs/yolox/yolox_nano_fast_8xb8-300e_coco.py
_base_ = './yolox_tiny_fast_8xb8-300e_coco.py' # ========================modified parameters====================== deepen_factor = 0.33 widen_factor = 0.25 use_depthwise = True # =======================Unmodified in most cases================== # model settings model = dict( backbone=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, use_depthwise=use_depthwise), neck=dict( deepen_factor=deepen_factor, widen_factor=widen_factor, use_depthwise=use_depthwise), bbox_head=dict( head_module=dict( widen_factor=widen_factor, use_depthwise=use_depthwise)))
648
28.5
69
py
mmyolo
mmyolo-main/configs/yolox/yolox_tiny_fast_8xb8-300e_coco.py
_base_ = './yolox_s_fast_8xb8-300e_coco.py' # ========================modified parameters====================== deepen_factor = 0.33 widen_factor = 0.375 scaling_ratio_range = (0.5, 1.5) # =======================Unmodified in most cases================== img_scale = _base_.img_scale pre_transform = _base_.pre_transform test_img_scale = (416, 416) tta_img_scales = [test_img_scale, (320, 320), (640, 640)] # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict( type='YOLOXBatchSyncRandomResize', random_size_range=(320, 640), size_divisor=32, interval=10) ]), backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_pipeline_stage1 = [ *pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='mmdet.RandomAffine', scaling_ratio_range=scaling_ratio_range, # note # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='mmdet.Resize', scale=test_img_scale, keep_ratio=True), # note dict( type='mmdet.Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline_stage1)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader # Config for Test Time Augmentation. (TTA) tta_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict( type='TestTimeAug', transforms=[ [ dict(type='mmdet.Resize', scale=s, keep_ratio=True) for s in tta_img_scales ], [ # ``RandomFlip`` must be placed before ``Pad``, otherwise # bounding box coordinates after flipping cannot be # recovered correctly. dict(type='mmdet.RandomFlip', prob=1.), dict(type='mmdet.RandomFlip', prob=0.) ], [ dict( type='mmdet.Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), ], [ dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')) ] ]) ]
3,369
32.366337
78
py
mmyolo
mmyolo-main/configs/yolox/yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py
_base_ = './yolox_s_fast_8xb8-300e_coco.py' # ========================modified parameters====================== # Batch size of a single GPU during training # 8 -> 32 train_batch_size_per_gpu = 32 # Multi-scale training intervals # 10 -> 1 batch_augments_interval = 1 # Last epoch number to switch training pipeline # 15 -> 20 num_last_epochs = 20 # Base learning rate for optim_wrapper. Corresponding to 8xb32=256 bs base_lr = 0.004 # SGD -> AdamW optim_wrapper = dict( _delete_=True, type='OptimWrapper', optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05), paramwise_cfg=dict( norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True)) # 0.0001 -> 0.0002 ema_momentum = 0.0002 # ============================== Unmodified in most cases =================== model = dict( data_preprocessor=dict(batch_augments=[ dict( type='YOLOXBatchSyncRandomResize', random_size_range=(480, 800), size_divisor=32, interval=batch_augments_interval) ])) param_scheduler = [ dict( # use quadratic formula to warm up 5 epochs # and lr is updated by iteration # TODO: fix default scope in get function type='mmdet.QuadraticWarmupLR', by_epoch=True, begin=0, end=5, convert_to_iter_based=True), dict( # use cosine lr from 5 to 285 epoch type='CosineAnnealingLR', eta_min=base_lr * 0.05, begin=5, T_max=_base_.max_epochs - num_last_epochs, end=_base_.max_epochs - num_last_epochs, by_epoch=True, convert_to_iter_based=True), dict( # use fixed lr during last num_last_epochs epochs type='ConstantLR', by_epoch=True, factor=1, begin=_base_.max_epochs - num_last_epochs, end=_base_.max_epochs, ) ] custom_hooks = [ dict( type='YOLOXModeSwitchHook', num_last_epochs=num_last_epochs, new_train_pipeline=_base_.train_pipeline_stage2, priority=48), dict(type='mmdet.SyncNormHook', priority=48), dict( type='EMAHook', ema_type='ExpMomentumEMA', momentum=ema_momentum, update_buffers=True, strict_load=False, priority=49) ] train_dataloader = dict(batch_size=train_batch_size_per_gpu) train_cfg = dict(dynamic_intervals=[(_base_.max_epochs - num_last_epochs, 1)]) auto_scale_lr = dict(base_batch_size=8 * train_batch_size_per_gpu)
2,494
27.352273
78
py
mmyolo
mmyolo-main/configs/yolox/yolox_m_fast_8xb8-300e_coco.py
_base_ = './yolox_s_fast_8xb8-300e_coco.py' # ========================modified parameters====================== deepen_factor = 0.67 widen_factor = 0.75 # =======================Unmodified in most cases================== # model settings model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
465
34.846154
74
py
mmyolo
mmyolo-main/configs/yolox/yolox_tiny_fast_8xb32-300e-rtmdet-hyp_coco.py
_base_ = './yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py' # ========================modified parameters====================== deepen_factor = 0.33 widen_factor = 0.375 # Multi-scale training intervals # 10 -> 1 batch_augments_interval = 1 scaling_ratio_range = (0.5, 1.5) # =======================Unmodified in most cases================== img_scale = _base_.img_scale pre_transform = _base_.pre_transform # model settings model = dict( data_preprocessor=dict(batch_augments=[ dict( type='YOLOXBatchSyncRandomResize', random_size_range=(320, 640), size_divisor=32, interval=batch_augments_interval) ]), backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor))) train_pipeline_stage1 = [ *pre_transform, dict( type='Mosaic', img_scale=img_scale, pad_val=114.0, pre_transform=pre_transform), dict( type='mmdet.RandomAffine', scaling_ratio_range=scaling_ratio_range, # note # img_scale is (width, height) border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict(type='mmdet.YOLOXHSVRandomAug'), dict(type='mmdet.RandomFlip', prob=0.5), dict( type='mmdet.FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip', 'flip_direction')) ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args), dict(type='mmdet.Resize', scale=(416, 416), keep_ratio=True), # note dict( type='mmdet.Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'), dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor')) ] train_dataloader = dict(dataset=dict(pipeline=train_pipeline_stage1)) val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) test_dataloader = val_dataloader
2,273
31.028169
77
py
mmyolo
mmyolo-main/configs/yolox/yolox_m_fast_8xb32-300e-rtmdet-hyp_coco.py
_base_ = './yolox_s_fast_8xb32-300e-rtmdet-hyp_coco.py' # ========================modified parameters====================== deepen_factor = 0.67 widen_factor = 0.75 # =======================Unmodified in most cases================== # model settings model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
477
35.769231
74
py
mmyolo
mmyolo-main/configs/yolox/yolox_l_fast_8xb8-300e_coco.py
_base_ = './yolox_s_fast_8xb8-300e_coco.py' # ========================modified parameters====================== deepen_factor = 1.0 widen_factor = 1.0 # =======================Unmodified in most cases================== # model settings model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
463
34.692308
74
py
mmyolo
mmyolo-main/configs/yolox/yolox_x_fast_8xb8-300e_coco.py
_base_ = './yolox_s_fast_8xb8-300e_coco.py' # ========================modified parameters====================== deepen_factor = 1.33 widen_factor = 1.25 # =======================Unmodified in most cases================== # model settings model = dict( backbone=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), neck=dict(deepen_factor=deepen_factor, widen_factor=widen_factor), bbox_head=dict(head_module=dict(widen_factor=widen_factor)))
465
34.846154
74
py
mmyolo
mmyolo-main/configs/_base_/default_runtime.py
default_scope = 'mmyolo' default_hooks = dict( timer=dict(type='IterTimerHook'), logger=dict(type='LoggerHook', interval=50), param_scheduler=dict(type='ParamSchedulerHook'), checkpoint=dict(type='CheckpointHook', interval=1), sampler_seed=dict(type='DistSamplerSeedHook'), visualization=dict(type='mmdet.DetVisualizationHook')) env_cfg = dict( cudnn_benchmark=False, mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), dist_cfg=dict(backend='nccl'), ) vis_backends = [dict(type='LocalVisBackend')] visualizer = dict( type='mmdet.DetLocalVisualizer', vis_backends=vis_backends, name='visualizer') log_processor = dict(type='LogProcessor', window_size=50, by_epoch=True) log_level = 'INFO' load_from = None resume = False # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) file_client_args = dict(backend='disk')
1,043
28.828571
72
py
mmyolo
mmyolo-main/configs/_base_/det_p5_tta.py
# TODO: Need to solve the problem of multiple file_client_args parameters # _file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) _file_client_args = dict(backend='disk') tta_model = dict( type='mmdet.DetTTAModel', tta_cfg=dict(nms=dict(type='nms', iou_threshold=0.65), max_per_img=300)) img_scales = [(640, 640), (320, 320), (960, 960)] # LoadImageFromFile # / | \ # (RatioResize,LetterResize) (RatioResize,LetterResize) (RatioResize,LetterResize) # noqa # / \ / \ / \ # RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip RandomFlip # noqa # | | | | | | # LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn LoadAnn # | | | | | | # PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn PackDetIn # noqa _multiscale_resize_transforms = [ dict( type='Compose', transforms=[ dict(type='YOLOv5KeepRatioResize', scale=s), dict( type='LetterResize', scale=s, allow_scale_up=False, pad_val=dict(img=114)) ]) for s in img_scales ] tta_pipeline = [ dict(type='LoadImageFromFile', file_client_args=_file_client_args), dict( type='TestTimeAug', transforms=[ _multiscale_resize_transforms, [ dict(type='mmdet.RandomFlip', prob=1.), dict(type='mmdet.RandomFlip', prob=0.) ], [dict(type='mmdet.LoadAnnotations', with_bbox=True)], [ dict( type='mmdet.PackDetInputs', meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'pad_param', 'flip', 'flip_direction')) ] ]) ]
2,216
37.224138
89
py
mmyolo
mmyolo-main/configs/deploy/base_dynamic.py
_base_ = ['./base_static.py'] onnx_config = dict( dynamic_axes={ 'input': { 0: 'batch', 2: 'height', 3: 'width' }, 'dets': { 0: 'batch', 1: 'num_dets' }, 'labels': { 0: 'batch', 1: 'num_dets' } })
337
17.777778
29
py
mmyolo
mmyolo-main/configs/deploy/detection_tensorrt_static-640x640.py
_base_ = ['./base_static.py'] onnx_config = dict(input_shape=(640, 640)) backend_config = dict( type='tensorrt', common_config=dict(fp16_mode=False, max_workspace_size=1 << 30), model_inputs=[ dict( input_shapes=dict( input=dict( min_shape=[1, 3, 640, 640], opt_shape=[1, 3, 640, 640], max_shape=[1, 3, 640, 640]))) ]) use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501
536
34.8
104
py
mmyolo
mmyolo-main/configs/deploy/detection_tensorrt-fp16_dynamic-64x64-1344x1344.py
_base_ = ['./base_dynamic.py'] backend_config = dict( type='tensorrt', common_config=dict(fp16_mode=True, max_workspace_size=1 << 32), model_inputs=[ dict( input_shapes=dict( input=dict( min_shape=[1, 3, 64, 64], opt_shape=[1, 3, 640, 640], max_shape=[1, 3, 1344, 1344]))) ]) use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501
493
34.285714
104
py
mmyolo
mmyolo-main/configs/deploy/detection_tensorrt_dynamic-192x192-960x960.py
_base_ = ['./base_dynamic.py'] backend_config = dict( type='tensorrt', common_config=dict(fp16_mode=False, max_workspace_size=1 << 30), model_inputs=[ dict( input_shapes=dict( input=dict( min_shape=[1, 3, 192, 192], opt_shape=[1, 3, 640, 640], max_shape=[1, 3, 960, 960]))) ]) use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501
494
34.357143
104
py
mmyolo
mmyolo-main/configs/deploy/detection_onnxruntime_static.py
_base_ = ['./base_static.py'] codebase_config = dict( type='mmyolo', task='ObjectDetection', model_type='end2end', post_processing=dict( score_threshold=0.05, confidence_threshold=0.005, iou_threshold=0.5, max_output_boxes_per_class=200, pre_top_k=5000, keep_top_k=100, background_label_id=-1), module=['mmyolo.deploy']) backend_config = dict(type='onnxruntime')
439
26.5
41
py
mmyolo
mmyolo-main/configs/deploy/detection_tensorrt-int8_static-640x640.py
_base_ = ['./base_static.py'] onnx_config = dict(input_shape=(640, 640)) backend_config = dict( type='tensorrt', common_config=dict( fp16_mode=True, max_workspace_size=1 << 30, int8_mode=True), model_inputs=[ dict( input_shapes=dict( input=dict( min_shape=[1, 3, 640, 640], opt_shape=[1, 3, 640, 640], max_shape=[1, 3, 640, 640]))) ]) calib_config = dict(create_calib=True, calib_file='calib_data.h5') use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501
627
35.941176
104
py
mmyolo
mmyolo-main/configs/deploy/detection_rknn-fp16_static-320x320.py
_base_ = ['./base_static.py'] onnx_config = dict( input_shape=[320, 320], output_names=['feat0', 'feat1', 'feat2']) codebase_config = dict(model_type='rknn') backend_config = dict( type='rknn', common_config=dict(target_platform='rv1126', optimization_level=1), quantization_config=dict(do_quantization=False, dataset=None), input_size_list=[[3, 320, 320]])
378
36.9
71
py
mmyolo
mmyolo-main/configs/deploy/base_static.py
onnx_config = dict( type='onnx', export_params=True, keep_initializers_as_inputs=False, opset_version=11, save_file='end2end.onnx', input_names=['input'], output_names=['dets', 'labels'], input_shape=None, optimize=True) codebase_config = dict( type='mmyolo', task='ObjectDetection', model_type='end2end', post_processing=dict( score_threshold=0.05, confidence_threshold=0.005, iou_threshold=0.5, max_output_boxes_per_class=200, pre_top_k=5000, keep_top_k=100, background_label_id=-1), module=['mmyolo.deploy'])
624
25.041667
39
py
mmyolo
mmyolo-main/configs/deploy/detection_tensorrt-int8_dynamic-192x192-960x960.py
_base_ = ['./base_dynamic.py'] backend_config = dict( type='tensorrt', common_config=dict( fp16_mode=True, max_workspace_size=1 << 30, int8_mode=True), model_inputs=[ dict( input_shapes=dict( input=dict( min_shape=[1, 3, 192, 192], opt_shape=[1, 3, 640, 640], max_shape=[1, 3, 960, 960]))) ]) calib_config = dict(create_calib=True, calib_file='calib_data.h5') use_efficientnms = False # whether to replace TRTBatchedNMS plugin with EfficientNMS plugin # noqa E501
585
35.625
104
py
mmyolo
mmyolo-main/configs/deploy/detection_rknn-int8_static-320x320.py
_base_ = ['./base_static.py'] onnx_config = dict( input_shape=[320, 320], output_names=['feat0', 'feat1', 'feat2']) codebase_config = dict(model_type='rknn') backend_config = dict( type='rknn', common_config=dict(target_platform='rv1126', optimization_level=1), quantization_config=dict(do_quantization=True, dataset=None), input_size_list=[[3, 320, 320]])
377
36.8
71
py