repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_ppyoloe_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict, MessageHub
from mmengine.config import Config
from mmengine.model import bias_init_with_prob
from mmengine.testing import assert_allclose
from mmyolo.models import PPYOLOEHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestPPYOLOEHead(TestCase):
def setUp(self):
self.head_module = dict(
type='PPYOLOEHeadModule',
num_classes=4,
in_channels=[32, 64, 128],
featmap_strides=(8, 16, 32))
def test_init_weights(self):
head = PPYOLOEHead(head_module=self.head_module)
head.head_module.init_weights()
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_reg in zip(head.head_module.cls_preds,
head.head_module.reg_preds):
assert_allclose(conv_cls.weight.data,
torch.zeros_like(conv_cls.weight.data))
assert_allclose(conv_reg.weight.data,
torch.zeros_like(conv_reg.weight.data))
assert_allclose(conv_cls.bias.data,
torch.ones_like(conv_cls.bias.data) * bias_init)
assert_allclose(conv_reg.bias.data,
torch.ones_like(conv_reg.bias.data))
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
nms_pre=1000,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.7),
max_per_img=300))
head = PPYOLOEHead(head_module=self.head_module, test_cfg=test_cfg)
head.eval()
feat = [
torch.rand(1, in_channels, s // feat_size, s // feat_size)
for in_channels, feat_size in [[32, 8], [64, 16], [128, 32]]
]
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
message_hub = MessageHub.get_instance('test_ppyoloe_loss_by_feat')
message_hub.update_info('epoch', 1)
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = PPYOLOEHead(
head_module=self.head_module,
train_cfg=ConfigDict(
initial_epoch=31,
initial_assigner=dict(
type='BatchATSSAssigner',
num_classes=4,
topk=9,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=4,
topk=13,
alpha=1,
beta=6)))
head.train()
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = torch.empty((0, 6), dtype=torch.float32)
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_dfl_loss = empty_gt_losses['loss_dfl'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be df loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = PPYOLOEHead(
head_module=self.head_module,
train_cfg=ConfigDict(
initial_epoch=31,
initial_assigner=dict(
type='BatchATSSAssigner',
num_classes=4,
topk=9,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=4,
topk=13,
alpha=1,
beta=6)))
head.train()
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]])
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = PPYOLOEHead(
head_module=self.head_module,
train_cfg=ConfigDict(
initial_epoch=31,
initial_assigner=dict(
type='BatchATSSAssigner',
num_classes=1,
topk=9,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=1,
topk=13,
alpha=1,
beta=6)))
head.train()
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]])
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
| 7,851 | 37.116505 | 78 | py |
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_yolov7_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models.dense_heads import YOLOv7Head
from mmyolo.utils import register_all_modules
register_all_modules()
# TODO: Test YOLOv7p6HeadModule
class TestYOLOv7Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv7HeadModule',
num_classes=2,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32],
num_base_priors=3)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv7Head(head_module=self.head_module, test_cfg=test_cfg)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv7Head(head_module=self.head_module)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOv7Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv7Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertEqual(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
| 5,521 | 36.821918 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_yolov5_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models.dense_heads import YOLOv5Head
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv5Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv5HeadModule',
num_classes=2,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32],
num_base_priors=3)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv5Head(head_module=self.head_module, test_cfg=test_cfg)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv5Head(head_module=self.head_module)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOv5Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv5Head(head_module=self.head_module)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertEqual(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
def test_loss_by_feat_with_ignore(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
# ignore boxes
gt_instances_ignore = torch.tensor(
[[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32)
empty_gt_losses = head._loss_by_feat_with_ignore(
cls_scores, bbox_preds, objectnesses, [gt_instances], img_metas,
gt_instances_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
gt_instances_ignore = torch.tensor(
[[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32)
one_gt_losses = head._loss_by_feat_with_ignore(cls_scores, bbox_preds,
objectnesses,
[gt_instances],
img_metas,
gt_instances_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv5Head(head_module=self.head_module, ignore_iof_thr=0.8)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
gt_instances_ignore = torch.tensor(
[[0, 0, 69.7688, 0, 619.3611, 62.2711]], dtype=torch.float32)
one_gt_losses = head._loss_by_feat_with_ignore(cls_scores, bbox_preds,
objectnesses,
[gt_instances],
img_metas,
gt_instances_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
self.assertEqual(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
| 9,871 | 40.654008 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_yolox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmengine.model import bias_init_with_prob
from mmengine.testing import assert_allclose
from mmyolo.models.dense_heads import YOLOXHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOXHead(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOXHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
)
def test_init_weights(self):
head = YOLOXHead(head_module=self.head_module)
head.head_module.init_weights()
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_obj in zip(head.head_module.multi_level_conv_cls,
head.head_module.multi_level_conv_obj):
assert_allclose(conv_cls.bias.data,
torch.ones_like(conv_cls.bias.data) * bias_init)
assert_allclose(conv_obj.bias.data,
torch.ones_like(conv_obj.bias.data) * bias_init)
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOXHead(head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
objectnesses,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
}]
train_cfg = Config(
dict(
assigner=dict(
type='mmdet.SimOTAAssigner',
iou_calculator=dict(type='mmdet.BboxOverlaps2D'),
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
head = YOLOXHead(head_module=self.head_module, train_cfg=train_cfg)
assert not head.use_bbox_aux
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = torch.empty((0, 6))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, gt_instances,
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when there are no true boxes')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = YOLOXHead(head_module=self.head_module, train_cfg=train_cfg)
head.use_bbox_aux = True
gt_instances = torch.Tensor(
[[0, 2, 23.6667, 23.8757, 238.6326, 151.8874]])
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, objectnesses,
gt_instances, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_bbox_aux'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_obj_loss.item(), 0,
'obj loss should be non-zero')
self.assertGreater(onegt_l1_loss.item(), 0,
'l1 loss should be non-zero')
# Test groud truth out of bound
gt_instances = torch.Tensor(
[[0, 2, s * 4, s * 4, s * 4 + 10, s * 4 + 10]])
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
objectnesses, gt_instances,
img_metas)
# When gt_bboxes out of bound, the assign results should be empty,
# so the cls and bbox loss should be zero.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
self.assertEqual(
empty_cls_loss.item(), 0,
'there should be no cls loss when gt_bboxes out of bound')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when gt_bboxes out of bound')
self.assertGreater(empty_obj_loss.item(), 0,
'objectness loss should be non-zero')
| 6,200 | 37.75625 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_yolov6_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.config import Config
from mmyolo.models.dense_heads import YOLOv6Head
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv6Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv6HeadModule',
num_classes=2,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32])
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv6Head(head_module=self.head_module, test_cfg=test_cfg)
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
| 1,713 | 26.645161 | 74 | py |
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_rotated_rtmdet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models.dense_heads import RTMDetRotatedHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestRTMDetRotatedHead(TestCase):
def setUp(self):
self.head_module = dict(
type='RTMDetRotatedSepBNHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=64,
featmap_strides=[4, 8, 16])
def test_init_weights(self):
head = RTMDetRotatedHead(head_module=self.head_module)
head.head_module.init_weights()
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = dict(
multi_label=True,
decode_with_angle=True,
nms_pre=2000,
score_thr=0.01,
nms=dict(type='nms_rotated', iou_threshold=0.1),
max_per_img=300)
test_cfg = Config(test_cfg)
head = RTMDetRotatedHead(
head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, angle_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
angle_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
angle_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
train_cfg = dict(
assigner=dict(
type='BatchDynamicSoftLabelAssigner',
num_classes=80,
topk=13,
iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'),
batch_iou=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
train_cfg = Config(train_cfg)
head = RTMDetRotatedHead(
head_module=self.head_module, train_cfg=train_cfg).cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, angle_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 5)).cuda(),
labels=torch.LongTensor([]).cuda())
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
angle_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'classification loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = RTMDetRotatedHead(
head_module=self.head_module, train_cfg=train_cfg).cuda()
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874,
0.2]]).cuda(),
labels=torch.LongTensor([1]).cuda())
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = RTMDetRotatedHead(
head_module=self.head_module, train_cfg=train_cfg).cuda()
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874,
0.2]]).cuda(),
labels=torch.LongTensor([0]).cuda())
cls_scores, bbox_preds, angle_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
def test_hbb_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
train_cfg = dict(
assigner=dict(
type='BatchDynamicSoftLabelAssigner',
num_classes=80,
topk=13,
iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'),
batch_iou=False),
allowed_border=-1,
pos_weight=-1,
debug=False)
train_cfg = Config(train_cfg)
hbb_cfg = dict(
bbox_coder=dict(
type='DistanceAnglePointCoder', angle_version='le90'),
loss_bbox=dict(type='mmdet.GIoULoss', loss_weight=2.0),
angle_coder=dict(
type='mmrotate.CSLCoder',
angle_version='le90',
omega=1,
window='gaussian',
radius=1),
loss_angle=dict(
type='mmrotate.SmoothFocalLoss',
gamma=2.0,
alpha=0.25,
loss_weight=0.2),
use_hbbox_loss=True,
)
head = RTMDetRotatedHead(
head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, angle_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 5)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
angle_preds, [gt_instances],
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_angle_loss = empty_gt_losses['loss_angle'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'classification loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_angle_loss.item(), 0,
'there should be no angle loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = RTMDetRotatedHead(
head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, 0.2]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_angle_loss = one_gt_losses['loss_angle'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_angle_loss.item(), 0,
'angle loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = RTMDetRotatedHead(
head_module=self.head_module, **hbb_cfg, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[130.6667, 86.8757, 100.6326, 70.8874, 0.2]]),
labels=torch.LongTensor([0]))
cls_scores, bbox_preds, angle_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds, angle_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_angle_loss = one_gt_losses['loss_angle'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_angle_loss.item(), 0,
'angle loss should be non-zero')
| 10,370 | 38.135849 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_yolov8_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine import ConfigDict
from mmengine.config import Config
from mmyolo.models import YOLOv8Head
from mmyolo.utils import register_all_modules
register_all_modules()
class TestYOLOv8Head(TestCase):
def setUp(self):
self.head_module = dict(
type='YOLOv8HeadModule',
num_classes=4,
in_channels=[32, 64, 128],
featmap_strides=[8, 16, 32])
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = Config(
dict(
multi_label=True,
max_per_img=300,
score_thr=0.01,
nms=dict(type='nms', iou_threshold=0.65)))
head = YOLOv8Head(head_module=self.head_module, test_cfg=test_cfg)
head.eval()
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
None,
img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
head = YOLOv8Head(
head_module=self.head_module,
train_cfg=ConfigDict(
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=4,
topk=10,
alpha=0.5,
beta=6)))
head.train()
feat = []
for i in range(len(self.head_module['in_channels'])):
in_channel = self.head_module['in_channels'][i]
feat_size = self.head_module['featmap_strides'][i]
feat.append(
torch.rand(1, in_channel, s // feat_size, s // feat_size))
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = torch.empty((0, 6), dtype=torch.float32)
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_dfl_loss = empty_gt_losses['loss_dfl'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
self.assertEqual(
empty_dfl_loss.item(), 0,
'there should be df loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874]])
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = YOLOv8Head(
head_module=self.head_module,
train_cfg=ConfigDict(
assigner=dict(
type='BatchTaskAlignedAssigner',
num_classes=1,
topk=10,
alpha=0.5,
beta=6)))
head.train()
gt_instances = torch.Tensor(
[[0., 0., 23.6667, 23.8757, 238.6326, 151.8874],
[1., 0., 24.6667, 27.8757, 28.6326, 51.8874]])
cls_scores, bbox_preds, bbox_dist_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
bbox_dist_preds, gt_instances,
img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_loss_dfl = one_gt_losses['loss_dfl'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
self.assertGreater(onegt_loss_dfl.item(), 0,
'obj loss should be non-zero')
| 5,914 | 35.512346 | 78 | py |
mmyolo | mmyolo-main/tests/test_models/test_dense_heads/test_rtmdet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import numpy as np
import torch
from mmengine.config import Config
from mmengine.structures import InstanceData
from mmyolo.models import RTMDetInsSepBNHead
from mmyolo.models.dense_heads import RTMDetHead
from mmyolo.utils import register_all_modules
register_all_modules()
class TestRTMDetHead(TestCase):
def setUp(self):
self.head_module = dict(
type='RTMDetSepBNHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=64,
featmap_strides=[4, 8, 16])
def test_init_weights(self):
head = RTMDetHead(head_module=self.head_module)
head.head_module.init_weights()
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
}]
test_cfg = dict(
multi_label=True,
nms_pre=30000,
score_thr=0.001,
nms=dict(type='nms', iou_threshold=0.65),
max_per_img=300)
test_cfg = Config(test_cfg)
head = RTMDetHead(head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
head.predict_by_feat(
cls_scores,
bbox_preds,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
def test_loss_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'batch_input_shape': (s, s),
'scale_factor': 1,
}]
train_cfg = dict(
assigner=dict(
num_classes=80,
type='BatchDynamicSoftLabelAssigner',
topk=13,
iou_calculator=dict(type='mmdet.BboxOverlaps2D')),
allowed_border=-1,
pos_weight=-1,
debug=False)
train_cfg = Config(train_cfg)
head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_instances = InstanceData(
bboxes=torch.empty((0, 4)), labels=torch.LongTensor([]))
empty_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
self.assertGreater(empty_cls_loss.item(), 0,
'classification loss should be non-zero')
self.assertEqual(
empty_box_loss.item(), 0,
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([1]))
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
# test num_class = 1
self.head_module['num_classes'] = 1
head = RTMDetHead(head_module=self.head_module, train_cfg=train_cfg)
gt_instances = InstanceData(
bboxes=torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
labels=torch.LongTensor([0]))
cls_scores, bbox_preds = head.forward(feat)
one_gt_losses = head.loss_by_feat(cls_scores, bbox_preds,
[gt_instances], img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
self.assertGreater(onegt_cls_loss.item(), 0,
'cls loss should be non-zero')
self.assertGreater(onegt_box_loss.item(), 0,
'box loss should be non-zero')
class TestRTMDetInsHead(TestCase):
def setUp(self):
self.head_module = dict(
type='RTMDetInsSepBNHeadModule',
num_classes=4,
in_channels=1,
stacked_convs=1,
feat_channels=64,
featmap_strides=[4, 8, 16],
num_prototypes=8,
dyconv_channels=8,
num_dyconvs=3,
share_conv=True,
use_sigmoid_cls=True)
def test_init_weights(self):
head = RTMDetInsSepBNHead(head_module=self.head_module)
head.head_module.init_weights()
def test_predict_by_feat(self):
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0),
'pad_param': np.array([0., 0., 0., 0.])
}]
test_cfg = dict(
multi_label=False,
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100,
mask_thr_binary=0.5)
test_cfg = Config(test_cfg)
head = RTMDetInsSepBNHead(
head_module=self.head_module, test_cfg=test_cfg)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, kernel_preds, mask_feat = head.forward(feat)
head.predict_by_feat(
cls_scores,
bbox_preds,
kernel_preds,
mask_feat,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=True,
with_nms=True)
img_metas_without_pad_param = [{
'img_shape': (s, s, 3),
'ori_shape': (s, s, 3),
'scale_factor': (1.0, 1.0)
}]
head.predict_by_feat(
cls_scores,
bbox_preds,
kernel_preds,
mask_feat,
batch_img_metas=img_metas_without_pad_param,
cfg=test_cfg,
rescale=True,
with_nms=True)
with self.assertRaises(AssertionError):
head.predict_by_feat(
cls_scores,
bbox_preds,
kernel_preds,
mask_feat,
batch_img_metas=img_metas,
cfg=test_cfg,
rescale=False,
with_nms=False)
| 7,546 | 32.691964 | 78 | py |
mmyolo | mmyolo-main/tests/test_models/test_detectors/test_yolo_detector.py | # Copyright (c) OpenMMLab. All rights reserved.
import time
import unittest
from unittest import TestCase
import torch
from mmdet.structures import DetDataSample
from mmdet.testing import demo_mm_inputs
from mmengine.logging import MessageHub
from parameterized import parameterized
from mmyolo.testing import get_detector_cfg
from mmyolo.utils import register_all_modules
class TestSingleStageDetector(TestCase):
def setUp(self):
register_all_modules()
@parameterized.expand([
'yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py',
'yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py',
'yolox/yolox_tiny_fast_8xb8-300e_coco.py',
'rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py',
'yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py',
'yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py'
])
def test_init(self, cfg_file):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmyolo.registry import MODELS
detector = MODELS.build(model)
self.assertTrue(detector.backbone)
self.assertTrue(detector.neck)
self.assertTrue(detector.bbox_head)
@parameterized.expand([
('yolov5/yolov5_s-v61_syncbn_8xb16-300e_coco.py', ('cuda', 'cpu')),
('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')),
('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')),
('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu'))
])
def test_forward_loss_mode(self, cfg_file, devices):
message_hub = MessageHub.get_instance(
f'test_single_stage_forward_loss_mode-{time.time()}')
message_hub.update_info('iter', 0)
message_hub.update_info('epoch', 0)
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
if 'fast' in cfg_file:
model.data_preprocessor = dict(
type='mmdet.DetDataPreprocessor',
mean=[0., 0., 0.],
std=[255., 255., 255.],
bgr_to_rgb=True)
from mmyolo.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
detector.init_weights()
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]])
data = detector.data_preprocessor(packed_inputs, True)
losses = detector.forward(**data, mode='loss')
self.assertIsInstance(losses, dict)
@parameterized.expand([
('yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py', ('cuda',
'cpu')),
('yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py', ('cuda', 'cpu')),
('yolox/yolox_tiny_fast_8xb8-300e_coco.py', ('cuda', 'cpu')),
('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')),
('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')),
('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu'))
])
def test_forward_predict_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmyolo.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]])
data = detector.data_preprocessor(packed_inputs, False)
# Test forward test
detector.eval()
with torch.no_grad():
batch_results = detector.forward(**data, mode='predict')
self.assertEqual(len(batch_results), 2)
self.assertIsInstance(batch_results[0], DetDataSample)
@parameterized.expand([
('yolov5/yolov5_n-v61_syncbn_fast_8xb16-300e_coco.py', ('cuda',
'cpu')),
('yolov6/yolov6_s_syncbn_fast_8xb32-400e_coco.py', ('cuda', 'cpu')),
('yolox/yolox_tiny_fast_8xb8-300e_coco.py', ('cuda', 'cpu')),
('yolov7/yolov7_tiny_syncbn_fast_8x16b-300e_coco.py', ('cuda', 'cpu')),
('rtmdet/rtmdet_tiny_syncbn_fast_8xb32-300e_coco.py', ('cuda', 'cpu')),
('yolov8/yolov8_n_syncbn_fast_8xb16-500e_coco.py', ('cuda', 'cpu'))
])
def test_forward_tensor_mode(self, cfg_file, devices):
model = get_detector_cfg(cfg_file)
model.backbone.init_cfg = None
from mmyolo.registry import MODELS
assert all([device in ['cpu', 'cuda'] for device in devices])
for device in devices:
detector = MODELS.build(model)
if device == 'cuda':
if not torch.cuda.is_available():
return unittest.skip('test requires GPU and torch+cuda')
detector = detector.cuda()
packed_inputs = demo_mm_inputs(2, [[3, 320, 128], [3, 125, 320]])
data = detector.data_preprocessor(packed_inputs, False)
batch_results = detector.forward(**data, mode='tensor')
self.assertIsInstance(batch_results, tuple)
| 5,695 | 40.275362 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_task_modules/test_coders/test_distance_point_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.coders import DistancePointBBoxCoder
class TestDistancePointBBoxCoder(TestCase):
def test_decoder(self):
coder = DistancePointBBoxCoder()
points = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.],
[29., 170.]])
pred_bboxes = torch.Tensor([[0, -1, 3, 3], [-1, -7, -4.8, 9],
[-23, -1, 12, 1], [14.5, -13, 10, 18.3]])
expected_distance = torch.Tensor([[74, 63, 80, 67],
[-25, 134, -48.2, 142],
[276, 67, 210, 67],
[-58, 248, 89, 279.8]])
strides = torch.Tensor([2, 4, 6, 6])
out_distance = coder.decode(points, pred_bboxes, strides)
assert expected_distance.allclose(out_distance)
batch_priors = points.unsqueeze(0).repeat(2, 1, 1)
batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0]
assert out_distance.allclose(batch_out)
| 1,218 | 39.633333 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_task_modules/test_coders/test_yolov5_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.coders import YOLOv5BBoxCoder
class TestYOLOv5Coder(TestCase):
def test_decoder(self):
coder = YOLOv5BBoxCoder()
priors = torch.Tensor([[10., 10., 20., 20.], [10., 8., 10., 10.],
[15., 8., 20., 3.], [2., 5., 5., 8.]])
pred_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[1.0000, 5.0000, 9.0000, 5.0000]])
strides = torch.Tensor([2, 4, 8, 8])
expected_decode_bboxes = torch.Tensor(
[[4.3111, 4.3111, 25.6889, 25.6889],
[10.2813, 5.7033, 10.2813, 12.8594],
[7.7949, 11.1710, 27.2051, 2.3369],
[1.1984, 8.4730, 13.1955, 20.3129]])
out = coder.decode(priors, pred_bboxes, strides)
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_priors = priors.unsqueeze(0).repeat(2, 1, 1)
batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0]
assert out.allclose(batch_out)
| 1,336 | 39.515152 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_task_modules/test_coders/test_yolox_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.coders import YOLOXBBoxCoder
class TestYOLOv5Coder(TestCase):
def test_decoder(self):
coder = YOLOXBBoxCoder()
priors = torch.Tensor([[10., 10.], [8., 8.], [15., 8.], [2., 5.]])
pred_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.0409, 0.1409, 0.8591, 0.8591],
[0.0000, 0.3161, 0.1945, 0.6839],
[1.0000, 5.0000, 0.2000, 0.6000]])
strides = torch.Tensor([2, 4, 6, 6])
expected_decode_bboxes = torch.Tensor(
[[7.2817, 7.2817, 12.7183, 12.7183],
[3.4415, 3.8415, 12.8857, 13.2857],
[11.3559, 3.9518, 18.6441, 15.8414],
[4.3358, 29.5336, 11.6642, 40.4664]])
out = coder.decode(priors, pred_bboxes, strides)
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_priors = priors.unsqueeze(0).repeat(2, 1, 1)
batch_pred_bboxes = pred_bboxes.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_priors, batch_pred_bboxes, strides)[0]
assert out.allclose(batch_out)
| 1,266 | 38.59375 | 77 | py |
mmyolo | mmyolo-main/tests/test_models/test_task_modules/test_assigners/test_batch_dsl_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmyolo.models.task_modules.assigners import BatchDynamicSoftLabelAssigner
class TestBatchDynamicSoftLabelAssigner(TestCase):
def test_assign(self):
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0)
pred_bboxes = torch.FloatTensor([
[23, 23, 43, 43],
[4, 5, 6, 7],
]).unsqueeze(0).repeat(batch_size, 10, 1)
pred_scores = torch.FloatTensor([
[0.2],
[0.8],
]).unsqueeze(0).repeat(batch_size, 10, 1)
priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6,
7]]).repeat(10, 1)
gt_bboxes = torch.FloatTensor([[23, 23, 43, 43]]).unsqueeze(0).repeat(
batch_size, 1, 1)
gt_labels = torch.LongTensor([[0]
]).unsqueeze(0).repeat(batch_size, 1, 1)
pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat(
batch_size, 1, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20,
4]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 20]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20]))
def test_assign_with_empty_gt(self):
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0)
pred_bboxes = torch.FloatTensor([
[23, 23, 43, 43],
[4, 5, 6, 7],
]).unsqueeze(0).repeat(batch_size, 10, 1)
pred_scores = torch.FloatTensor([
[0.2],
[0.8],
]).unsqueeze(0).repeat(batch_size, 10, 1)
priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6,
7]]).repeat(10, 1)
gt_bboxes = torch.zeros(batch_size, 0, 4)
gt_labels = torch.zeros(batch_size, 0, 1)
pad_bbox_flag = torch.zeros(batch_size, 0, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20,
4]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 20]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20]))
def test_assign_with_empty_boxs(self):
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0)
pred_bboxes = torch.zeros(batch_size, 0, 4)
pred_scores = torch.zeros(batch_size, 0, 4)
priors = torch.zeros(0, 4)
gt_bboxes = torch.FloatTensor([[23, 23, 43, 43]]).unsqueeze(0).repeat(
batch_size, 1, 1)
gt_labels = torch.LongTensor([[0]
]).unsqueeze(0).repeat(batch_size, 1, 1)
pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat(
batch_size, 1, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 0]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 0, 4]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 0]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 0]))
def test_assign_rotate_box(self):
try:
import importlib
importlib.import_module('mmrotate')
except ImportError:
pytest.skip('mmrotate is not installed.', allow_module_level=True)
num_classes = 2
batch_size = 2
assigner = BatchDynamicSoftLabelAssigner(
num_classes=num_classes,
soft_center_radius=3.0,
topk=1,
iou_weight=3.0,
iou_calculator=dict(type='mmrotate.RBboxOverlaps2D'),
# RBboxOverlaps2D doesn't support batch input, use loop instead.
batch_iou=False,
)
pred_bboxes = torch.FloatTensor([
[23, 23, 20, 20, 0.078],
[4, 5, 2, 2, 0.078],
]).unsqueeze(0).repeat(batch_size, 10, 1)
pred_scores = torch.FloatTensor([
[0.2],
[0.8],
]).unsqueeze(0).repeat(batch_size, 10, 1)
priors = torch.FloatTensor([[30, 30, 8, 8], [4, 5, 6,
7]]).repeat(10, 1)
gt_bboxes = torch.FloatTensor([[23, 23, 20, 20,
0.078]]).unsqueeze(0).repeat(
batch_size, 1, 1)
gt_labels = torch.LongTensor([[0]
]).unsqueeze(0).repeat(batch_size, 1, 1)
pad_bbox_flag = torch.FloatTensor([[1]]).unsqueeze(0).repeat(
batch_size, 1, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_labels_weights = assign_result['assigned_labels_weights']
assigned_bboxes = assign_result['assigned_bboxes']
assign_metrics = assign_result['assign_metrics']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 20]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 20,
5]))
self.assertEqual(assigned_labels_weights.shape,
torch.Size([batch_size, 20]))
self.assertEqual(assign_metrics.shape, torch.Size([batch_size, 20]))
| 7,466 | 37.689119 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_task_modules/test_assigners/test_batch_atss_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.assigners import BatchATSSAssigner
class TestBatchATSSAssigner(TestCase):
def test_batch_atss_assigner(self):
num_classes = 2
batch_size = 2
batch_atss_assigner = BatchATSSAssigner(
topk=3,
iou_calculator=dict(type='mmdet.BboxOverlaps2D'),
num_classes=num_classes)
priors = torch.FloatTensor([
[4., 4., 8., 8.],
[12., 4., 8., 8.],
[20., 4., 8., 8.],
[28., 4., 8., 8.],
]).repeat(21, 1)
gt_bboxes = torch.FloatTensor([
[0, 0, 60, 93],
[229, 0, 532, 157],
]).unsqueeze(0).repeat(batch_size, 1, 1)
gt_labels = torch.LongTensor([
[0],
[11],
]).unsqueeze(0).repeat(batch_size, 1, 1)
num_level_bboxes = [64, 16, 4]
pad_bbox_flag = torch.FloatTensor([
[1],
[0],
]).unsqueeze(0).repeat(batch_size, 1, 1)
pred_bboxes = torch.FloatTensor([
[-4., -4., 12., 12.],
[4., -4., 20., 12.],
[12., -4., 28., 12.],
[20., -4., 36., 12.],
]).unsqueeze(0).repeat(batch_size, 21, 1)
batch_assign_result = batch_atss_assigner.forward(
pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes,
pad_bbox_flag)
assigned_labels = batch_assign_result['assigned_labels']
assigned_bboxes = batch_assign_result['assigned_bboxes']
assigned_scores = batch_assign_result['assigned_scores']
fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84,
4]))
self.assertEqual(assigned_scores.shape,
torch.Size([batch_size, 84, num_classes]))
self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84]))
def test_batch_atss_assigner_with_empty_gt(self):
"""Test corner case where an image might have no true detections."""
num_classes = 2
batch_size = 2
batch_atss_assigner = BatchATSSAssigner(
topk=3,
iou_calculator=dict(type='mmdet.BboxOverlaps2D'),
num_classes=num_classes)
priors = torch.FloatTensor([
[4., 4., 8., 8.],
[12., 4., 8., 8.],
[20., 4., 8., 8.],
[28., 4., 8., 8.],
]).repeat(21, 1)
num_level_bboxes = [64, 16, 4]
pad_bbox_flag = torch.FloatTensor([
[1],
[0],
]).unsqueeze(0).repeat(batch_size, 1, 1)
pred_bboxes = torch.FloatTensor([
[-4., -4., 12., 12.],
[4., -4., 20., 12.],
[12., -4., 28., 12.],
[20., -4., 36., 12.],
]).unsqueeze(0).repeat(batch_size, 21, 1)
gt_bboxes = torch.zeros(batch_size, 0, 4)
gt_labels = torch.zeros(batch_size, 0, 1)
batch_assign_result = batch_atss_assigner.forward(
pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes,
pad_bbox_flag)
assigned_labels = batch_assign_result['assigned_labels']
assigned_bboxes = batch_assign_result['assigned_bboxes']
assigned_scores = batch_assign_result['assigned_scores']
fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84,
4]))
self.assertEqual(assigned_scores.shape,
torch.Size([batch_size, 84, num_classes]))
self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84]))
def test_batch_atss_assigner_with_empty_boxs(self):
"""Test corner case where a network might predict no boxes."""
num_classes = 2
batch_size = 2
batch_atss_assigner = BatchATSSAssigner(
topk=3,
iou_calculator=dict(type='mmdet.BboxOverlaps2D'),
num_classes=num_classes)
priors = torch.zeros(84, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 60, 93],
[229, 0, 532, 157],
]).unsqueeze(0).repeat(batch_size, 1, 1)
gt_labels = torch.LongTensor([
[0],
[11],
]).unsqueeze(0).repeat(batch_size, 1, 1)
num_level_bboxes = [64, 16, 4]
pad_bbox_flag = torch.FloatTensor([[1], [0]]).unsqueeze(0).repeat(
batch_size, 1, 1)
pred_bboxes = torch.FloatTensor([
[-4., -4., 12., 12.],
[4., -4., 20., 12.],
[12., -4., 28., 12.],
[20., -4., 36., 12.],
]).unsqueeze(0).repeat(batch_size, 21, 1)
batch_assign_result = batch_atss_assigner.forward(
pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes,
pad_bbox_flag)
assigned_labels = batch_assign_result['assigned_labels']
assigned_bboxes = batch_assign_result['assigned_bboxes']
assigned_scores = batch_assign_result['assigned_scores']
fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84,
4]))
self.assertEqual(assigned_scores.shape,
torch.Size([batch_size, 84, num_classes]))
self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84]))
def test_batch_atss_assigner_with_empty_boxes_and_gt(self):
"""Test corner case where a network might predict no boxes and no
gt."""
num_classes = 2
batch_size = 2
batch_atss_assigner = BatchATSSAssigner(
topk=3,
iou_calculator=dict(type='mmdet.BboxOverlaps2D'),
num_classes=num_classes)
priors = torch.zeros(84, 4)
gt_bboxes = torch.zeros(batch_size, 0, 4)
gt_labels = torch.zeros(batch_size, 0, 1)
num_level_bboxes = [64, 16, 4]
pad_bbox_flag = torch.zeros(batch_size, 0, 1)
pred_bboxes = torch.zeros(batch_size, 0, 4)
batch_assign_result = batch_atss_assigner.forward(
pred_bboxes, priors, num_level_bboxes, gt_labels, gt_bboxes,
pad_bbox_flag)
assigned_labels = batch_assign_result['assigned_labels']
assigned_bboxes = batch_assign_result['assigned_bboxes']
assigned_scores = batch_assign_result['assigned_scores']
fg_mask_pre_prior = batch_assign_result['fg_mask_pre_prior']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84,
4]))
self.assertEqual(assigned_scores.shape,
torch.Size([batch_size, 84, num_classes]))
self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84]))
| 7,366 | 40.857955 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_task_modules/test_assigners/test_batch_task_aligned_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.task_modules.assigners import BatchTaskAlignedAssigner
class TestBatchTaskAlignedAssigner(TestCase):
def test_batch_task_aligned_assigner(self):
batch_size = 2
num_classes = 4
assigner = BatchTaskAlignedAssigner(
num_classes=num_classes, alpha=1, beta=6, topk=13, eps=1e-9)
pred_scores = torch.FloatTensor([
[0.1, 0.2],
[0.2, 0.3],
[0.3, 0.4],
[0.4, 0.5],
]).unsqueeze(0).repeat(batch_size, 21, 1)
priors = torch.FloatTensor([
[0, 0, 4., 4.],
[0, 0, 12., 4.],
[0, 0, 20., 4.],
[0, 0, 28., 4.],
]).repeat(21, 1)
gt_bboxes = torch.FloatTensor([
[0, 0, 60, 93],
[229, 0, 532, 157],
]).unsqueeze(0).repeat(batch_size, 1, 1)
gt_labels = torch.LongTensor([[0], [1]
]).unsqueeze(0).repeat(batch_size, 1, 1)
pad_bbox_flag = torch.FloatTensor([[1], [0]]).unsqueeze(0).repeat(
batch_size, 1, 1)
pred_bboxes = torch.FloatTensor([
[-4., -4., 12., 12.],
[4., -4., 20., 12.],
[12., -4., 28., 12.],
[20., -4., 36., 12.],
]).unsqueeze(0).repeat(batch_size, 21, 1)
assign_result = assigner.forward(pred_bboxes, pred_scores, priors,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_labels = assign_result['assigned_labels']
assigned_bboxes = assign_result['assigned_bboxes']
assigned_scores = assign_result['assigned_scores']
fg_mask_pre_prior = assign_result['fg_mask_pre_prior']
self.assertEqual(assigned_labels.shape, torch.Size([batch_size, 84]))
self.assertEqual(assigned_bboxes.shape, torch.Size([batch_size, 84,
4]))
self.assertEqual(assigned_scores.shape,
torch.Size([batch_size, 84, num_classes]))
self.assertEqual(fg_mask_pre_prior.shape, torch.Size([batch_size, 84]))
| 2,212 | 37.824561 | 79 | py |
mmyolo | mmyolo-main/tests/test_models/test_plugins/test_cbam.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmyolo.models.plugins import CBAM
from mmyolo.utils import register_all_modules
register_all_modules()
class TestCBAM(TestCase):
def test_forward(self):
tensor_shape = (2, 16, 20, 20)
images = torch.randn(*tensor_shape)
cbam = CBAM(16)
out = cbam(images)
self.assertEqual(out.shape, tensor_shape)
# test other ratio
cbam = CBAM(16, reduce_ratio=8)
out = cbam(images)
self.assertEqual(out.shape, tensor_shape)
# test other act_cfg in ChannelAttention
cbam = CBAM(in_channels=16, act_cfg=dict(type='Sigmoid'))
out = cbam(images)
self.assertEqual(out.shape, tensor_shape)
| 783 | 23.5 | 65 | py |
mmyolo | mmyolo-main/tests/test_datasets/test_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmdet.structures import DetDataSample
from mmdet.structures.bbox import HorizontalBoxes
from mmengine.structures import InstanceData
from mmyolo.datasets import BatchShapePolicy, yolov5_collate
def _rand_bboxes(rng, num_boxes, w, h):
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * w) - (w * bw / 2)).clip(0, w)
tl_y = ((cy * h) - (h * bh / 2)).clip(0, h)
br_x = ((cx * w) + (w * bw / 2)).clip(0, w)
br_y = ((cy * h) + (h * bh / 2)).clip(0, h)
bboxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
return bboxes
class TestYOLOv5Collate(unittest.TestCase):
def test_yolov5_collate(self):
rng = np.random.RandomState(0)
inputs = torch.randn((3, 10, 10))
data_samples = DetDataSample()
gt_instances = InstanceData()
bboxes = _rand_bboxes(rng, 4, 6, 8)
gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32)
labels = rng.randint(1, 2, size=len(bboxes))
gt_instances.labels = torch.LongTensor(labels)
data_samples.gt_instances = gt_instances
out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)])
self.assertIsInstance(out, dict)
self.assertTrue(out['inputs'].shape == (1, 3, 10, 10))
self.assertTrue(out['data_samples'], dict)
self.assertTrue(out['data_samples']['bboxes_labels'].shape == (4, 6))
out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)] *
2)
self.assertIsInstance(out, dict)
self.assertTrue(out['inputs'].shape == (2, 3, 10, 10))
self.assertTrue(out['data_samples'], dict)
self.assertTrue(out['data_samples']['bboxes_labels'].shape == (8, 6))
def test_yolov5_collate_with_multi_scale(self):
rng = np.random.RandomState(0)
inputs = torch.randn((3, 10, 10))
data_samples = DetDataSample()
gt_instances = InstanceData()
bboxes = _rand_bboxes(rng, 4, 6, 8)
gt_instances.bboxes = HorizontalBoxes(bboxes, dtype=torch.float32)
labels = rng.randint(1, 2, size=len(bboxes))
gt_instances.labels = torch.LongTensor(labels)
data_samples.gt_instances = gt_instances
out = yolov5_collate([dict(inputs=inputs, data_samples=data_samples)],
use_ms_training=True)
self.assertIsInstance(out, dict)
self.assertTrue(out['inputs'][0].shape == (3, 10, 10))
self.assertTrue(out['data_samples'], dict)
self.assertTrue(out['data_samples']['bboxes_labels'].shape == (4, 6))
self.assertIsInstance(out['inputs'], list)
self.assertIsInstance(out['data_samples']['bboxes_labels'],
torch.Tensor)
out = yolov5_collate(
[dict(inputs=inputs, data_samples=data_samples)] * 2,
use_ms_training=True)
self.assertIsInstance(out, dict)
self.assertTrue(out['inputs'][0].shape == (3, 10, 10))
self.assertTrue(out['data_samples'], dict)
self.assertTrue(out['data_samples']['bboxes_labels'].shape == (8, 6))
self.assertIsInstance(out['inputs'], list)
self.assertIsInstance(out['data_samples']['bboxes_labels'],
torch.Tensor)
class TestBatchShapePolicy(unittest.TestCase):
def test_batch_shape_policy(self):
src_data_infos = [{
'height': 20,
'width': 100,
}, {
'height': 11,
'width': 100,
}, {
'height': 21,
'width': 100,
}, {
'height': 30,
'width': 100,
}, {
'height': 10,
'width': 100,
}]
expected_data_infos = [{
'height': 10,
'width': 100,
'batch_shape': np.array([96, 672])
}, {
'height': 11,
'width': 100,
'batch_shape': np.array([96, 672])
}, {
'height': 20,
'width': 100,
'batch_shape': np.array([160, 672])
}, {
'height': 21,
'width': 100,
'batch_shape': np.array([160, 672])
}, {
'height': 30,
'width': 100,
'batch_shape': np.array([224, 672])
}]
batch_shapes_policy = BatchShapePolicy(batch_size=2)
out_data_infos = batch_shapes_policy(src_data_infos)
for i in range(5):
self.assertEqual(
(expected_data_infos[i]['height'],
expected_data_infos[i]['width']),
(out_data_infos[i]['height'], out_data_infos[i]['width']))
self.assertTrue(
np.allclose(expected_data_infos[i]['batch_shape'],
out_data_infos[i]['batch_shape']))
| 4,918 | 34.388489 | 79 | py |
mmyolo | mmyolo-main/tests/test_datasets/test_transforms/test_mix_img_transforms.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import numpy as np
import torch
from mmdet.structures.bbox import HorizontalBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
from mmyolo.datasets import YOLOv5CocoDataset
from mmyolo.datasets.transforms import Mosaic, Mosaic9, YOLOv5MixUp, YOLOXMixUp
from mmyolo.utils import register_all_modules
register_all_modules()
class TestMosaic(unittest.TestCase):
def setUp(self):
"""Setup the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.pre_transform = [
dict(
type='LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='LoadAnnotations', with_bbox=True)
]
self.dataset = YOLOv5CocoDataset(
data_prefix=dict(
img=osp.join(osp.dirname(__file__), '../../data')),
ann_file=osp.join(
osp.dirname(__file__), '../../data/coco_sample_color.json'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=[])
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'dataset':
self.dataset
}
def test_transform(self):
# test assertion for invalid img_scale
with self.assertRaises(AssertionError):
transform = Mosaic(img_scale=640)
# test assertion for invalid probability
with self.assertRaises(AssertionError):
transform = Mosaic(prob=1.5)
# test assertion for invalid max_cached_images
with self.assertRaises(AssertionError):
transform = Mosaic(use_cached=True, max_cached_images=1)
transform = Mosaic(
img_scale=(12, 10), pre_transform=self.pre_transform)
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_no_gt(self):
self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64)
self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool)
transform = Mosaic(
img_scale=(12, 10), pre_transform=self.pre_transform)
results = transform(copy.deepcopy(self.results))
self.assertIsInstance(results, dict)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(
results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].
shape[0] == results['gt_ignore_flags'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_box_list(self):
transform = Mosaic(
img_scale=(12, 10), pre_transform=self.pre_transform)
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_mask(self):
rng = np.random.RandomState(0)
pre_transform = [
dict(
type='LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True)
]
dataset = YOLOv5CocoDataset(
data_prefix=dict(
img=osp.join(osp.dirname(__file__), '../../data')),
ann_file=osp.join(
osp.dirname(__file__), '../../data/coco_sample_color.json'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=[])
results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
PolygonMasks.random(num_masks=3, height=224, width=224, rng=rng),
'dataset':
dataset
}
transform = Mosaic(img_scale=(12, 10), pre_transform=pre_transform)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
class TestMosaic9(unittest.TestCase):
def setUp(self):
"""Setup the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.pre_transform = [
dict(
type='LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='LoadAnnotations', with_bbox=True)
]
self.dataset = YOLOv5CocoDataset(
data_prefix=dict(
img=osp.join(osp.dirname(__file__), '../../data')),
ann_file=osp.join(
osp.dirname(__file__), '../../data/coco_sample_color.json'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=[])
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),
'dataset':
self.dataset
}
def test_transform(self):
# test assertion for invalid img_scale
with self.assertRaises(AssertionError):
transform = Mosaic9(img_scale=640)
# test assertion for invalid probability
with self.assertRaises(AssertionError):
transform = Mosaic9(prob=1.5)
# test assertion for invalid max_cached_images
with self.assertRaises(AssertionError):
transform = Mosaic9(use_cached=True, max_cached_images=1)
transform = Mosaic9(
img_scale=(12, 10), pre_transform=self.pre_transform)
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_no_gt(self):
self.results['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
self.results['gt_bboxes_labels'] = np.empty((0, ), dtype=np.int64)
self.results['gt_ignore_flags'] = np.empty((0, ), dtype=bool)
transform = Mosaic9(
img_scale=(12, 10), pre_transform=self.pre_transform)
results = transform(copy.deepcopy(self.results))
self.assertIsInstance(results, dict)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(
results['gt_bboxes_labels'].shape[0] == results['gt_bboxes'].
shape[0] == results['gt_ignore_flags'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_box_list(self):
transform = Mosaic9(
img_scale=(12, 10), pre_transform=self.pre_transform)
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (20, 24))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
class TestYOLOv5MixUp(unittest.TestCase):
def setUp(self):
"""Setup the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.pre_transform = [
dict(
type='LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='LoadAnnotations', with_bbox=True)
]
self.dataset = YOLOv5CocoDataset(
data_prefix=dict(
img=osp.join(osp.dirname(__file__), '../../data')),
ann_file=osp.join(
osp.dirname(__file__), '../../data/coco_sample_color.json'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=[])
self.results = {
'img':
np.random.random((288, 512, 3)),
'img_shape': (288, 512),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'dataset':
self.dataset
}
def test_transform(self):
transform = YOLOv5MixUp(pre_transform=self.pre_transform)
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (288, 512))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
# test assertion for invalid max_cached_images
with self.assertRaises(AssertionError):
transform = YOLOv5MixUp(use_cached=True, max_cached_images=1)
def test_transform_with_box_list(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = YOLOv5MixUp(pre_transform=self.pre_transform)
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (288, 512))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_mask(self):
rng = np.random.RandomState(0)
pre_transform = [
dict(
type='LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True)
]
dataset = YOLOv5CocoDataset(
data_prefix=dict(
img=osp.join(osp.dirname(__file__), '../../data')),
ann_file=osp.join(
osp.dirname(__file__), '../../data/coco_sample_color.json'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=[])
results = {
'img':
np.random.random((288, 512, 3)),
'img_shape': (288, 512),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
PolygonMasks.random(num_masks=3, height=288, width=512, rng=rng),
'dataset':
dataset
}
transform = YOLOv5MixUp(pre_transform=pre_transform)
results = transform(copy.deepcopy(results))
self.assertTrue(results['img'].shape[:2] == (288, 512))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
class TestYOLOXMixUp(unittest.TestCase):
def setUp(self):
"""Setup the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.pre_transform = [
dict(
type='LoadImageFromFile',
file_client_args=dict(backend='disk')),
dict(type='LoadAnnotations', with_bbox=True)
]
self.dataset = YOLOv5CocoDataset(
data_prefix=dict(
img=osp.join(osp.dirname(__file__), '../../data')),
ann_file=osp.join(
osp.dirname(__file__), '../../data/coco_sample_color.json'),
filter_cfg=dict(filter_empty_gt=False, min_size=32),
pipeline=[])
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
'gt_masks':
BitmapMasks(rng.rand(3, 224, 224), height=224, width=224),
'dataset':
self.dataset
}
def test_transform(self):
# test assertion for invalid img_scale
with self.assertRaises(AssertionError):
transform = YOLOXMixUp(img_scale=640)
# test assertion for invalid max_cached_images
with self.assertRaises(AssertionError):
transform = YOLOXMixUp(use_cached=True, max_cached_images=1)
transform = YOLOXMixUp(
img_scale=(10, 12),
ratio_range=(0.8, 1.6),
pad_val=114.0,
pre_transform=self.pre_transform)
# self.results['mix_results'] = [copy.deepcopy(self.results)]
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_boxlist(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = YOLOXMixUp(
img_scale=(10, 12),
ratio_range=(0.8, 1.6),
pad_val=114.0,
pre_transform=self.pre_transform)
results = transform(results)
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
| 17,683 | 40.221445 | 79 | py |
mmyolo | mmyolo-main/tests/test_datasets/test_transforms/test_transforms.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import unittest
import mmcv
import numpy as np
import torch
from mmdet.structures.bbox import HorizontalBoxes
from mmdet.structures.mask import BitmapMasks, PolygonMasks
from mmyolo.datasets.transforms import (LetterResize, LoadAnnotations,
YOLOv5HSVRandomAug,
YOLOv5KeepRatioResize,
YOLOv5RandomAffine)
from mmyolo.datasets.transforms.transforms import (PPYOLOERandomCrop,
PPYOLOERandomDistort,
YOLOv5CopyPaste)
class TestLetterResize(unittest.TestCase):
def setUp(self):
"""Set up the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.data_info1 = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32),
batch_shape=np.array([192, 672], dtype=np.int64),
gt_masks=PolygonMasks.random(1, height=300, width=400, rng=rng))
self.data_info2 = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32))
self.data_info3 = dict(
img=np.random.random((300, 400, 3)),
batch_shape=np.array([192, 672], dtype=np.int64))
self.data_info4 = dict(img=np.random.random((300, 400, 3)))
def test_letter_resize(self):
# Test allow_scale_up
transform = LetterResize(scale=(640, 640), allow_scale_up=False)
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (192, 672, 3))
self.assertTrue(
(results['gt_bboxes'] == np.array([[208., 0., 304., 96.]])).all())
self.assertTrue((results['batch_shape'] == np.array([192, 672])).all())
self.assertTrue((results['pad_param'] == np.array([0., 0., 208.,
208.])).all())
self.assertTrue(
(np.array(results['scale_factor'], dtype=np.float32) <= 1.).all())
# Test pad_val
transform = LetterResize(scale=(640, 640), pad_val=dict(img=144))
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (192, 672, 3))
self.assertTrue(
(results['gt_bboxes'] == np.array([[208., 0., 304., 96.]])).all())
self.assertTrue((results['batch_shape'] == np.array([192, 672])).all())
self.assertTrue((results['pad_param'] == np.array([0., 0., 208.,
208.])).all())
self.assertTrue(
(np.array(results['scale_factor'], dtype=np.float32) <= 1.).all())
# Test use_mini_pad
transform = LetterResize(scale=(640, 640), use_mini_pad=True)
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (192, 256, 3))
self.assertTrue((results['gt_bboxes'] == np.array([[0., 0., 96.,
96.]])).all())
self.assertTrue((results['batch_shape'] == np.array([192, 672])).all())
self.assertTrue((results['pad_param'] == np.array([0., 0., 0.,
0.])).all())
self.assertTrue(
(np.array(results['scale_factor'], dtype=np.float32) <= 1.).all())
# Test stretch_only
transform = LetterResize(scale=(640, 640), stretch_only=True)
results = transform(copy.deepcopy(self.data_info1))
self.assertEqual(results['img_shape'], (192, 672, 3))
self.assertTrue((results['gt_bboxes'] == np.array(
[[0., 0., 251.99998474121094, 96.]])).all())
self.assertTrue((results['batch_shape'] == np.array([192, 672])).all())
self.assertTrue((results['pad_param'] == np.array([0., 0., 0.,
0.])).all())
# Test
transform = LetterResize(scale=(640, 640), pad_val=dict(img=144))
for _ in range(5):
input_h, input_w = np.random.randint(100, 700), np.random.randint(
100, 700)
output_h, output_w = np.random.randint(100,
700), np.random.randint(
100, 700)
data_info = dict(
img=np.random.random((input_h, input_w, 3)),
gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32),
batch_shape=np.array([output_h, output_w], dtype=np.int64),
gt_masks=PolygonMasks(
[[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]],
height=input_h,
width=input_w))
results = transform(data_info)
self.assertEqual(results['img_shape'], (output_h, output_w, 3))
self.assertTrue(
(results['batch_shape'] == np.array([output_h,
output_w])).all())
# Test without batchshape
transform = LetterResize(scale=(640, 640), pad_val=dict(img=144))
for _ in range(5):
input_h, input_w = np.random.randint(100, 700), np.random.randint(
100, 700)
data_info = dict(
img=np.random.random((input_h, input_w, 3)),
gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32),
gt_masks=PolygonMasks(
[[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]],
height=input_h,
width=input_w))
results = transform(data_info)
self.assertEqual(results['img_shape'], (640, 640, 3))
# TODO: Testing the existence of multiple scale_factor and pad_param
transform = [
YOLOv5KeepRatioResize(scale=(32, 32)),
LetterResize(scale=(64, 68), pad_val=dict(img=144))
]
for _ in range(5):
input_h, input_w = np.random.randint(100, 700), np.random.randint(
100, 700)
output_h, output_w = np.random.randint(100,
700), np.random.randint(
100, 700)
data_info = dict(
img=np.random.random((input_h, input_w, 3)),
gt_bboxes=np.array([[0, 0, 5, 5]], dtype=np.float32),
batch_shape=np.array([output_h, output_w], dtype=np.int64))
for t in transform:
data_info = t(data_info)
# because of the "math.round" operation,
# it is unable to strictly restore the original input shape
# we just validate the correctness of scale_factor and pad_param
self.assertIn('scale_factor', data_info)
self.assertIn('pad_param', data_info)
pad_param = data_info['pad_param'].reshape(-1, 2).sum(
1) # (top, b, l, r) -> (h, w)
scale_factor = np.asarray(
data_info['scale_factor'])[::-1] # (w, h) -> (h, w)
scale_factor_keepratio = np.min(
np.asarray((32, 32)) / (input_h, input_w))
validate_shape = np.floor(
np.asarray((input_h, input_w)) * scale_factor_keepratio + 0.5)
scale_factor_keepratio = np.floor(scale_factor_keepratio *
input_h + 0.5) / input_h
scale_factor_letter = (output_h, output_w) / validate_shape
scale_factor_letter = (
scale_factor_letter -
(pad_param / validate_shape))[np.argmin(scale_factor_letter)]
self.assertTrue(data_info['img_shape'][:2] == (output_h, output_w))
self.assertTrue((scale_factor == (scale_factor_keepratio *
scale_factor_letter)).all())
class TestYOLOv5KeepRatioResize(unittest.TestCase):
def setUp(self):
"""Set up the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
rng = np.random.RandomState(0)
self.data_info1 = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[0, 0, 150, 150]], dtype=np.float32),
gt_masks=PolygonMasks.random(
num_masks=1, height=300, width=400, rng=rng))
self.data_info2 = dict(img=np.random.random((300, 400, 3)))
def test_yolov5_keep_ratio_resize(self):
# test assertion for invalid keep_ratio
with self.assertRaises(AssertionError):
transform = YOLOv5KeepRatioResize(scale=(640, 640))
transform.keep_ratio = False
results = transform(copy.deepcopy(self.data_info1))
# Test with gt_bboxes
transform = YOLOv5KeepRatioResize(scale=(640, 640))
results = transform(copy.deepcopy(self.data_info1))
self.assertTrue(transform.keep_ratio, True)
self.assertEqual(results['img_shape'], (480, 640))
self.assertTrue(
(results['gt_bboxes'] == np.array([[0., 0., 240., 240.]])).all())
self.assertTrue((np.array(results['scale_factor'],
dtype=np.float32) == 1.6).all())
# Test only img
transform = YOLOv5KeepRatioResize(scale=(640, 640))
results = transform(copy.deepcopy(self.data_info2))
self.assertEqual(results['img_shape'], (480, 640))
self.assertTrue((np.array(results['scale_factor'],
dtype=np.float32) == 1.6).all())
class TestYOLOv5HSVRandomAug(unittest.TestCase):
def setUp(self):
"""Set up the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.data_info = dict(
img=mmcv.imread(
osp.join(osp.dirname(__file__), '../../data/color.jpg'),
'color'))
def test_yolov5_hsv_random_aug(self):
# Test with gt_bboxes
transform = YOLOv5HSVRandomAug(
hue_delta=0.015, saturation_delta=0.7, value_delta=0.4)
results = transform(copy.deepcopy(self.data_info))
self.assertTrue(
results['img'].shape[:2] == self.data_info['img'].shape[:2])
class TestLoadAnnotations(unittest.TestCase):
def setUp(self):
"""Set up the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
data_prefix = osp.join(osp.dirname(__file__), '../../data')
seg_map = osp.join(data_prefix, 'gray.jpg')
self.results = {
'ori_shape': (300, 400),
'seg_map_path':
seg_map,
'instances': [{
'bbox': [0, 0, 10, 20],
'bbox_label': 1,
'mask': [[0, 0, 0, 20, 10, 20, 10, 0]],
'ignore_flag': 0
}, {
'bbox': [10, 10, 110, 120],
'bbox_label': 2,
'mask': [[10, 10, 110, 10, 110, 120, 110, 10]],
'ignore_flag': 0
}, {
'bbox': [50, 50, 60, 80],
'bbox_label': 2,
'mask': [[50, 50, 60, 50, 60, 80, 50, 80]],
'ignore_flag': 1
}]
}
def test_load_bboxes(self):
transform = LoadAnnotations(
with_bbox=True,
with_label=False,
with_seg=False,
with_mask=False,
box_type=None)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes', results)
self.assertTrue((results['gt_bboxes'] == np.array([[0, 0, 10, 20],
[10, 10, 110,
120]])).all())
self.assertEqual(results['gt_bboxes'].dtype, np.float32)
self.assertTrue(
(results['gt_ignore_flags'] == np.array([False, False])).all())
self.assertEqual(results['gt_ignore_flags'].dtype, bool)
# test empty instance
results = transform({})
self.assertIn('gt_bboxes', results)
self.assertTrue(results['gt_bboxes'].shape == (0, 4))
self.assertIn('gt_ignore_flags', results)
self.assertTrue(results['gt_ignore_flags'].shape == (0, ))
def test_load_labels(self):
transform = LoadAnnotations(
with_bbox=False,
with_label=True,
with_seg=False,
with_mask=False,
)
results = transform(copy.deepcopy(self.results))
self.assertIn('gt_bboxes_labels', results)
self.assertTrue((results['gt_bboxes_labels'] == np.array([1,
2])).all())
self.assertEqual(results['gt_bboxes_labels'].dtype, np.int64)
# test empty instance
results = transform({})
self.assertIn('gt_bboxes_labels', results)
self.assertTrue(results['gt_bboxes_labels'].shape == (0, ))
class TestYOLOv5RandomAffine(unittest.TestCase):
def setUp(self):
"""Setup the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
# test assertion for invalid translate_ratio
with self.assertRaises(AssertionError):
transform = YOLOv5RandomAffine(max_translate_ratio=1.5)
# test assertion for invalid scaling_ratio_range
with self.assertRaises(AssertionError):
transform = YOLOv5RandomAffine(scaling_ratio_range=(1.5, 0.5))
with self.assertRaises(AssertionError):
transform = YOLOv5RandomAffine(scaling_ratio_range=(0, 0.5))
transform = YOLOv5RandomAffine()
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_boxlist(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = YOLOv5RandomAffine()
results = transform(copy.deepcopy(results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
class TestPPYOLOERandomCrop(unittest.TestCase):
def setUp(self):
"""Setup the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
transform = PPYOLOERandomCrop()
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_boxlist(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = PPYOLOERandomCrop()
results = transform(copy.deepcopy(results))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
class TestPPYOLOERandomDistort(unittest.TestCase):
def setUp(self):
"""Setup the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.results = {
'img':
np.random.random((224, 224, 3)),
'img_shape': (224, 224),
'gt_bboxes_labels':
np.array([1, 2, 3], dtype=np.int64),
'gt_bboxes':
np.array([[10, 10, 20, 20], [20, 20, 40, 40], [40, 40, 80, 80]],
dtype=np.float32),
'gt_ignore_flags':
np.array([0, 0, 1], dtype=bool),
}
def test_transform(self):
# test assertion for invalid prob
with self.assertRaises(AssertionError):
transform = PPYOLOERandomDistort(
hue_cfg=dict(min=-18, max=18, prob=1.5))
# test assertion for invalid num_distort_func
with self.assertRaises(AssertionError):
transform = PPYOLOERandomDistort(num_distort_func=5)
transform = PPYOLOERandomDistort()
results = transform(copy.deepcopy(self.results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == np.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
def test_transform_with_boxlist(self):
results = copy.deepcopy(self.results)
results['gt_bboxes'] = HorizontalBoxes(results['gt_bboxes'])
transform = PPYOLOERandomDistort()
results = transform(copy.deepcopy(results))
self.assertTrue(results['img'].shape[:2] == (224, 224))
self.assertTrue(results['gt_bboxes_labels'].shape[0] ==
results['gt_bboxes'].shape[0])
self.assertTrue(results['gt_bboxes_labels'].dtype == np.int64)
self.assertTrue(results['gt_bboxes'].dtype == torch.float32)
self.assertTrue(results['gt_ignore_flags'].dtype == bool)
class TestYOLOv5CopyPaste(unittest.TestCase):
def setUp(self):
"""Set up the data info which are used in every test method.
TestCase calls functions in this order: setUp() -> testMethod() ->
tearDown() -> cleanUp()
"""
self.data_info = dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32),
gt_masks=PolygonMasks(
[[np.array([0., 0., 0., 10., 10., 10., 10., 0.])]],
height=300,
width=400))
def test_transform(self):
# test transform
transform = YOLOv5CopyPaste(prob=1.0)
results = transform(copy.deepcopy(self.data_info))
self.assertTrue(len(results['gt_bboxes']) == 2)
self.assertTrue(len(results['gt_masks']) == 2)
rng = np.random.RandomState(0)
# test with bitmap
with self.assertRaises(AssertionError):
results = transform(
dict(
img=np.random.random((300, 400, 3)),
gt_bboxes=np.array([[0, 0, 10, 10]], dtype=np.float32),
gt_masks=BitmapMasks(
rng.rand(1, 300, 400), height=300, width=400)))
| 21,175 | 42.12831 | 79 | py |
mmyolo | mmyolo-main/demo/deploy_demo.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Deploy demo for mmdeploy.
This script help user to run mmdeploy demo after convert the
checkpoint to backends.
Usage:
python deploy_demo.py img \
config \
checkpoint \
[--deploy-cfg DEPLOY_CFG] \
[--device DEVICE] \
[--out-dir OUT_DIR] \
[--show] \
[--score-thr SCORE_THR]
Example:
python deploy_demo.py \
${MMYOLO_PATH}/data/cat/images \
./yolov5_s-v61_syncbn_fast_1xb32-100e_cat.py \
./end2end.engine \
--deploy-cfg ./detection_tensorrt-fp16_dynamic-192x192-960x960.py \
--out-dir ${MMYOLO_PATH}/work_dirs/deploy_predict_out \
--device cuda:0 \
--score-thr 0.5
"""
import argparse
import os
import torch
from mmengine import ProgressBar
from mmyolo.utils.misc import get_file_list
try:
from mmdeploy.apis.utils import build_task_processor
from mmdeploy.utils import get_input_shape, load_config
except ImportError:
raise ImportError(
'mmdeploy is not installed, please see '
'https://mmdeploy.readthedocs.io/en/1.x/01-how-to-build/build_from_source.html' # noqa
)
def parse_args():
parser = argparse.ArgumentParser(description='For mmdeploy predict')
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='model config root')
parser.add_argument('checkpoint', help='checkpoint backend model path')
parser.add_argument('--deploy-cfg', help='deploy config path')
parser.add_argument(
'--device', default='cuda:0', help='device used for conversion')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--show', action='store_true', help='Show the detection results')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
args = parser.parse_args()
return args
# TODO Still need to refactor to not building dataset.
def main():
args = parse_args()
if not os.path.exists(args.out_dir) and not args.show:
os.mkdir(args.out_dir)
# read deploy_cfg and config
deploy_cfg, model_cfg = load_config(args.deploy_cfg, args.config)
# build task and backend model
task_processor = build_task_processor(model_cfg, deploy_cfg, args.device)
model = task_processor.build_backend_model([args.checkpoint])
# get model input shape
input_shape = get_input_shape(deploy_cfg)
# get file list
files, source_type = get_file_list(args.img)
# start detector inference
progress_bar = ProgressBar(len(files))
for file in files:
# process input image
model_inputs, _ = task_processor.create_input(file, input_shape)
# do model inference
with torch.no_grad():
result = model.test_step(model_inputs)
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
out_file = None if args.show else os.path.join(args.out_dir, filename)
# filter score
result = result[0]
result.pred_instances = result.pred_instances[
result.pred_instances.scores > args.score_thr]
# visualize results
task_processor.visualize(
image=file,
model=model,
result=result,
show_result=args.show,
window_name=os.path.basename(filename),
output_file=out_file)
progress_bar.update()
print('All done!')
if __name__ == '__main__':
main()
| 3,823 | 30.603306 | 95 | py |
mmyolo | mmyolo-main/demo/boxam_vis_demo.py | # Copyright (c) OpenMMLab. All rights reserved.
"""This script is in the experimental verification stage and cannot be
guaranteed to be completely correct. Currently Grad-based CAM and Grad-free CAM
are supported.
The target detection task is different from the classification task. It not
only includes the AM map of the category, but also includes information such as
bbox and mask, so this script is named bboxam.
"""
import argparse
import os.path
import warnings
from functools import partial
import cv2
import mmcv
from mmengine import Config, DictAction, MessageHub
from mmengine.utils import ProgressBar
from mmyolo.utils.boxam_utils import (BoxAMDetectorVisualizer,
BoxAMDetectorWrapper, DetAblationLayer,
DetBoxScoreTarget, GradCAM,
GradCAMPlusPlus, reshape_transform)
from mmyolo.utils.misc import get_file_list
try:
from pytorch_grad_cam import AblationCAM, EigenCAM
except ImportError:
raise ImportError('Please run `pip install "grad-cam"` to install '
'pytorch_grad_cam package.')
GRAD_FREE_METHOD_MAP = {
'ablationcam': AblationCAM,
'eigencam': EigenCAM,
# 'scorecam': ScoreCAM, # consumes too much memory
}
GRAD_BASED_METHOD_MAP = {'gradcam': GradCAM, 'gradcam++': GradCAMPlusPlus}
ALL_SUPPORT_METHODS = list(GRAD_FREE_METHOD_MAP.keys()
| GRAD_BASED_METHOD_MAP.keys())
IGNORE_LOSS_PARAMS = {
'yolov5': ['loss_obj'],
'yolov6': ['loss_cls'],
'yolox': ['loss_obj'],
'rtmdet': ['loss_cls'],
'yolov7': ['loss_obj'],
'yolov8': ['loss_cls'],
'ppyoloe': ['loss_cls'],
}
# This parameter is required in some algorithms
# for calculating Loss
message_hub = MessageHub.get_current_instance()
message_hub.runtime_info['epoch'] = 0
def parse_args():
parser = argparse.ArgumentParser(description='Visualize Box AM')
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--method',
default='gradcam',
choices=ALL_SUPPORT_METHODS,
help='Type of method to use, supports '
f'{", ".join(ALL_SUPPORT_METHODS)}.')
parser.add_argument(
'--target-layers',
default=['neck.out_layers[2]'],
nargs='+',
type=str,
help='The target layers to get Box AM, if not set, the tool will '
'specify the neck.out_layers[2]')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--show', action='store_true', help='Show the CAM results')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument(
'--topk',
type=int,
default=-1,
help='Select topk predict resutls to show. -1 are mean all.')
parser.add_argument(
'--max-shape',
nargs='+',
type=int,
default=-1,
help='max shapes. Its purpose is to save GPU memory. '
'The activation map is scaled and then evaluated. '
'If set to -1, it means no scaling.')
parser.add_argument(
'--preview-model',
default=False,
action='store_true',
help='To preview all the model layers')
parser.add_argument(
'--norm-in-bbox', action='store_true', help='Norm in bbox of am image')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
# Only used by AblationCAM
parser.add_argument(
'--batch-size',
type=int,
default=1,
help='batch of inference of AblationCAM')
parser.add_argument(
'--ratio-channels-to-ablate',
type=int,
default=0.5,
help='Making it much faster of AblationCAM. '
'The parameter controls how many channels should be ablated')
args = parser.parse_args()
return args
def init_detector_and_visualizer(args, cfg):
max_shape = args.max_shape
if not isinstance(max_shape, list):
max_shape = [args.max_shape]
assert len(max_shape) == 1 or len(max_shape) == 2
model_wrapper = BoxAMDetectorWrapper(
cfg, args.checkpoint, args.score_thr, device=args.device)
if args.preview_model:
print(model_wrapper.detector)
print('\n Please remove `--preview-model` to get the BoxAM.')
return None, None
target_layers = []
for target_layer in args.target_layers:
try:
target_layers.append(
eval(f'model_wrapper.detector.{target_layer}'))
except Exception as e:
print(model_wrapper.detector)
raise RuntimeError('layer does not exist', e)
ablationcam_extra_params = {
'batch_size': args.batch_size,
'ablation_layer': DetAblationLayer(),
'ratio_channels_to_ablate': args.ratio_channels_to_ablate
}
if args.method in GRAD_BASED_METHOD_MAP:
method_class = GRAD_BASED_METHOD_MAP[args.method]
is_need_grad = True
else:
method_class = GRAD_FREE_METHOD_MAP[args.method]
is_need_grad = False
boxam_detector_visualizer = BoxAMDetectorVisualizer(
method_class,
model_wrapper,
target_layers,
reshape_transform=partial(
reshape_transform, max_shape=max_shape, is_need_grad=is_need_grad),
is_need_grad=is_need_grad,
extra_params=ablationcam_extra_params)
return model_wrapper, boxam_detector_visualizer
def main():
args = parse_args()
# hard code
ignore_loss_params = None
for param_keys in IGNORE_LOSS_PARAMS:
if param_keys in args.config:
print(f'The algorithm currently used is {param_keys}')
ignore_loss_params = IGNORE_LOSS_PARAMS[param_keys]
break
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if not os.path.exists(args.out_dir) and not args.show:
os.mkdir(args.out_dir)
model_wrapper, boxam_detector_visualizer = init_detector_and_visualizer(
args, cfg)
# get file list
image_list, source_type = get_file_list(args.img)
progress_bar = ProgressBar(len(image_list))
for image_path in image_list:
image = cv2.imread(image_path)
model_wrapper.set_input_data(image)
# forward detection results
result = model_wrapper()[0]
pred_instances = result.pred_instances
# Get candidate predict info with score threshold
pred_instances = pred_instances[pred_instances.scores > args.score_thr]
if len(pred_instances) == 0:
warnings.warn('empty detection results! skip this')
continue
if args.topk > 0:
pred_instances = pred_instances[:args.topk]
targets = [
DetBoxScoreTarget(
pred_instances,
device=args.device,
ignore_loss_params=ignore_loss_params)
]
if args.method in GRAD_BASED_METHOD_MAP:
model_wrapper.need_loss(True)
model_wrapper.set_input_data(image, pred_instances)
boxam_detector_visualizer.switch_activations_and_grads(
model_wrapper)
# get box am image
grayscale_boxam = boxam_detector_visualizer(image, targets=targets)
# draw cam on image
pred_instances = pred_instances.numpy()
image_with_bounding_boxes = boxam_detector_visualizer.show_am(
image,
pred_instances,
grayscale_boxam,
with_norm_in_bboxes=args.norm_in_bbox)
if source_type['is_dir']:
filename = os.path.relpath(image_path, args.img).replace('/', '_')
else:
filename = os.path.basename(image_path)
out_file = None if args.show else os.path.join(args.out_dir, filename)
if out_file:
mmcv.imwrite(image_with_bounding_boxes, out_file)
else:
cv2.namedWindow(filename, 0)
cv2.imshow(filename, image_with_bounding_boxes)
cv2.waitKey(0)
# switch
if args.method in GRAD_BASED_METHOD_MAP:
model_wrapper.need_loss(False)
boxam_detector_visualizer.switch_activations_and_grads(
model_wrapper)
progress_bar.update()
if not args.show:
print(f'All done!'
f'\nResults have been saved at {os.path.abspath(args.out_dir)}')
if __name__ == '__main__':
main()
| 9,251 | 32.400722 | 79 | py |
mmyolo | mmyolo-main/docs/en/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMYOLO'
copyright = '2022, OpenMMLab'
author = 'MMYOLO Authors'
version_file = '../../mmyolo/version.py'
def get_version():
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmyolo.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmyolo'
},
],
# Specify the language of shared menu
'menu_lang': 'en',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,414 | 28.439655 | 79 | py |
mmyolo | mmyolo-main/docs/zh_cn/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
import pytorch_sphinx_theme
sys.path.insert(0, os.path.abspath('../../'))
# -- Project information -----------------------------------------------------
project = 'MMYOLO'
copyright = '2022, OpenMMLab'
author = 'MMYOLO Authors'
version_file = '../../mmyolo/version.py'
def get_version():
with open(version_file) as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
# The full version, including alpha/beta/rc tags
release = get_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'myst_parser',
'sphinx_markdown_tables',
'sphinx_copybutton',
]
myst_enable_extensions = ['colon_fence']
myst_heading_anchors = 3
autodoc_mock_imports = [
'matplotlib', 'pycocotools', 'terminaltables', 'mmyolo.version', 'mmcv.ops'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
'menu': [
{
'name': 'GitHub',
'url': 'https://github.com/open-mmlab/mmyolo'
},
],
# Specify the language of shared menu
'menu_lang': 'cn',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/readthedocs.css']
language = 'zh_CN'
# -- Extension configuration -------------------------------------------------
# Ignore >>> when copying code
copybutton_prompt_text = r'>>> |\.\.\. '
copybutton_prompt_is_regexp = True
def builder_inited_handler(app):
subprocess.run(['./stat.py'])
def setup(app):
app.connect('builder-inited', builder_inited_handler)
| 3,434 | 28.110169 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/data_preprocessors/data_preprocessor.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
from typing import List, Optional, Tuple, Union
import torch
import torch.nn.functional as F
from mmdet.models import BatchSyncRandomResize
from mmdet.models.data_preprocessors import DetDataPreprocessor
from mmengine import MessageHub, is_list_of
from mmengine.structures import BaseDataElement
from torch import Tensor
from mmyolo.registry import MODELS
CastData = Union[tuple, dict, BaseDataElement, torch.Tensor, list, bytes, str,
None]
@MODELS.register_module()
class YOLOXBatchSyncRandomResize(BatchSyncRandomResize):
"""YOLOX batch random resize.
Args:
random_size_range (tuple): The multi-scale random range during
multi-scale training.
interval (int): The iter interval of change
image size. Defaults to 10.
size_divisor (int): Image size divisible factor.
Defaults to 32.
"""
def forward(self, inputs: Tensor, data_samples: dict) -> Tensor and dict:
"""resize a batch of images and bboxes to shape ``self._input_size``"""
h, w = inputs.shape[-2:]
inputs = inputs.float()
assert isinstance(data_samples, dict)
if self._input_size is None:
self._input_size = (h, w)
scale_y = self._input_size[0] / h
scale_x = self._input_size[1] / w
if scale_x != 1 or scale_y != 1:
inputs = F.interpolate(
inputs,
size=self._input_size,
mode='bilinear',
align_corners=False)
data_samples['bboxes_labels'][:, 2::2] *= scale_x
data_samples['bboxes_labels'][:, 3::2] *= scale_y
message_hub = MessageHub.get_current_instance()
if (message_hub.get_info('iter') + 1) % self._interval == 0:
self._input_size = self._get_random_size(
aspect_ratio=float(w / h), device=inputs.device)
return inputs, data_samples
@MODELS.register_module()
class YOLOv5DetDataPreprocessor(DetDataPreprocessor):
"""Rewrite collate_fn to get faster training speed.
Note: It must be used together with `mmyolo.datasets.utils.yolov5_collate`
"""
def __init__(self, *args, non_blocking: Optional[bool] = True, **kwargs):
super().__init__(*args, non_blocking=non_blocking, **kwargs)
def forward(self, data: dict, training: bool = False) -> dict:
"""Perform normalization, padding and bgr2rgb conversion based on
``DetDataPreprocessorr``.
Args:
data (dict): Data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
"""
if not training:
return super().forward(data, training)
data = self.cast_data(data)
inputs, data_samples = data['inputs'], data['data_samples']
assert isinstance(data['data_samples'], dict)
# TODO: Supports multi-scale training
if self._channel_conversion and inputs.shape[1] == 3:
inputs = inputs[:, [2, 1, 0], ...]
if self._enable_normalize:
inputs = (inputs - self.mean) / self.std
if self.batch_augments is not None:
for batch_aug in self.batch_augments:
inputs, data_samples = batch_aug(inputs, data_samples)
img_metas = [{'batch_input_shape': inputs.shape[2:]}] * len(inputs)
data_samples_output = {
'bboxes_labels': data_samples['bboxes_labels'],
'img_metas': img_metas
}
if 'masks' in data_samples:
data_samples_output['masks'] = data_samples['masks']
return {'inputs': inputs, 'data_samples': data_samples_output}
@MODELS.register_module()
class PPYOLOEDetDataPreprocessor(DetDataPreprocessor):
"""Image pre-processor for detection tasks.
The main difference between PPYOLOEDetDataPreprocessor and
DetDataPreprocessor is the normalization order. The official
PPYOLOE resize image first, and then normalize image.
In DetDataPreprocessor, the order is reversed.
Note: It must be used together with
`mmyolo.datasets.utils.yolov5_collate`
"""
def forward(self, data: dict, training: bool = False) -> dict:
"""Perform normalization、padding and bgr2rgb conversion based on
``BaseDataPreprocessor``. This class use batch_augments first, and then
normalize the image, which is different from the `DetDataPreprocessor`
.
Args:
data (dict): Data sampled from dataloader.
training (bool): Whether to enable training time augmentation.
Returns:
dict: Data in the same format as the model input.
"""
if not training:
return super().forward(data, training)
assert isinstance(data['inputs'], list) and is_list_of(
data['inputs'], torch.Tensor), \
'"inputs" should be a list of Tensor, but got ' \
f'{type(data["inputs"])}. The possible reason for this ' \
'is that you are not using it with ' \
'"mmyolo.datasets.utils.yolov5_collate". Please refer to ' \
'"cconfigs/ppyoloe/ppyoloe_plus_s_fast_8xb8-80e_coco.py".'
data = self.cast_data(data)
inputs, data_samples = data['inputs'], data['data_samples']
assert isinstance(data['data_samples'], dict)
# Process data.
batch_inputs = []
for _input in inputs:
# channel transform
if self._channel_conversion:
_input = _input[[2, 1, 0], ...]
# Convert to float after channel conversion to ensure
# efficiency
_input = _input.float()
batch_inputs.append(_input)
# Batch random resize image.
if self.batch_augments is not None:
for batch_aug in self.batch_augments:
inputs, data_samples = batch_aug(batch_inputs, data_samples)
if self._enable_normalize:
inputs = (inputs - self.mean) / self.std
img_metas = [{'batch_input_shape': inputs.shape[2:]}] * len(inputs)
data_samples = {
'bboxes_labels': data_samples['bboxes_labels'],
'img_metas': img_metas
}
return {'inputs': inputs, 'data_samples': data_samples}
# TODO: No generality. Its input data format is different
# mmdet's batch aug, and it must be compatible in the future.
@MODELS.register_module()
class PPYOLOEBatchRandomResize(BatchSyncRandomResize):
"""PPYOLOE batch random resize.
Args:
random_size_range (tuple): The multi-scale random range during
multi-scale training.
interval (int): The iter interval of change
image size. Defaults to 10.
size_divisor (int): Image size divisible factor.
Defaults to 32.
random_interp (bool): Whether to choose interp_mode randomly.
If set to True, the type of `interp_mode` must be list.
If set to False, the type of `interp_mode` must be str.
Defaults to True.
interp_mode (Union[List, str]): The modes available for resizing
are ('nearest', 'bilinear', 'bicubic', 'area').
keep_ratio (bool): Whether to keep the aspect ratio when resizing
the image. Now we only support keep_ratio=False.
Defaults to False.
"""
def __init__(self,
random_size_range: Tuple[int, int],
interval: int = 1,
size_divisor: int = 32,
random_interp=True,
interp_mode: Union[List[str], str] = [
'nearest', 'bilinear', 'bicubic', 'area'
],
keep_ratio: bool = False) -> None:
super().__init__(random_size_range, interval, size_divisor)
self.random_interp = random_interp
self.keep_ratio = keep_ratio
# TODO: need to support keep_ratio==True
assert not self.keep_ratio, 'We do not yet support keep_ratio=True'
if self.random_interp:
assert isinstance(interp_mode, list) and len(interp_mode) > 1,\
'While random_interp==True, the type of `interp_mode`' \
' must be list and len(interp_mode) must large than 1'
self.interp_mode_list = interp_mode
self.interp_mode = None
else:
assert isinstance(interp_mode, str),\
'While random_interp==False, the type of ' \
'`interp_mode` must be str'
assert interp_mode in ['nearest', 'bilinear', 'bicubic', 'area']
self.interp_mode_list = None
self.interp_mode = interp_mode
def forward(self, inputs: list,
data_samples: dict) -> Tuple[Tensor, Tensor]:
"""Resize a batch of images and bboxes to shape ``self._input_size``.
The inputs and data_samples should be list, and
``PPYOLOEBatchRandomResize`` must be used with
``PPYOLOEDetDataPreprocessor`` and ``yolov5_collate`` with
``use_ms_training == True``.
"""
assert isinstance(inputs, list),\
'The type of inputs must be list. The possible reason for this ' \
'is that you are not using it with `PPYOLOEDetDataPreprocessor` ' \
'and `yolov5_collate` with use_ms_training == True.'
bboxes_labels = data_samples['bboxes_labels']
message_hub = MessageHub.get_current_instance()
if (message_hub.get_info('iter') + 1) % self._interval == 0:
# get current input size
self._input_size, interp_mode = self._get_random_size_and_interp()
if self.random_interp:
self.interp_mode = interp_mode
# TODO: need to support type(inputs)==Tensor
if isinstance(inputs, list):
outputs = []
for i in range(len(inputs)):
_batch_input = inputs[i]
h, w = _batch_input.shape[-2:]
scale_y = self._input_size[0] / h
scale_x = self._input_size[1] / w
if scale_x != 1. or scale_y != 1.:
if self.interp_mode in ('nearest', 'area'):
align_corners = None
else:
align_corners = False
_batch_input = F.interpolate(
_batch_input.unsqueeze(0),
size=self._input_size,
mode=self.interp_mode,
align_corners=align_corners)
# rescale boxes
indexes = bboxes_labels[:, 0] == i
bboxes_labels[indexes, 2] *= scale_x
bboxes_labels[indexes, 3] *= scale_y
bboxes_labels[indexes, 4] *= scale_x
bboxes_labels[indexes, 5] *= scale_y
data_samples['bboxes_labels'] = bboxes_labels
else:
_batch_input = _batch_input.unsqueeze(0)
outputs.append(_batch_input)
# convert to Tensor
return torch.cat(outputs, dim=0), data_samples
else:
raise NotImplementedError('Not implemented yet!')
def _get_random_size_and_interp(self) -> Tuple[int, int]:
"""Randomly generate a shape in ``_random_size_range`` and a
interp_mode in interp_mode_list."""
size = random.randint(*self._random_size_range)
input_size = (self._size_divisor * size, self._size_divisor * size)
if self.random_interp:
interp_ind = random.randint(0, len(self.interp_mode_list) - 1)
interp_mode = self.interp_mode_list[interp_ind]
else:
interp_mode = None
return input_size, interp_mode
| 11,943 | 38.419142 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/detectors/yolo_detector.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.models.detectors.single_stage import SingleStageDetector
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from mmengine.dist import get_world_size
from mmengine.logging import print_log
from mmyolo.registry import MODELS
@MODELS.register_module()
class YOLODetector(SingleStageDetector):
r"""Implementation of YOLO Series
Args:
backbone (:obj:`ConfigDict` or dict): The backbone config.
neck (:obj:`ConfigDict` or dict): The neck config.
bbox_head (:obj:`ConfigDict` or dict): The bbox head config.
train_cfg (:obj:`ConfigDict` or dict, optional): The training config
of YOLO. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): The testing config
of YOLO. Defaults to None.
data_preprocessor (:obj:`ConfigDict` or dict, optional): Config of
:class:`DetDataPreprocessor` to process the input data.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
use_syncbn (bool): whether to use SyncBatchNorm. Defaults to True.
"""
def __init__(self,
backbone: ConfigType,
neck: ConfigType,
bbox_head: ConfigType,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None,
use_syncbn: bool = True):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
# TODO: Waiting for mmengine support
if use_syncbn and get_world_size() > 1:
torch.nn.SyncBatchNorm.convert_sync_batchnorm(self)
print_log('Using SyncBatchNorm()', 'current')
| 2,138 | 38.611111 | 76 | py |
mmyolo | mmyolo-main/mmyolo/models/plugins/cbam.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.utils import OptMultiConfig
from mmengine.model import BaseModule
from mmyolo.registry import MODELS
class ChannelAttention(BaseModule):
"""ChannelAttention.
Args:
channels (int): The input (and output) channels of the
ChannelAttention.
reduce_ratio (int): Squeeze ratio in ChannelAttention, the intermediate
channel will be ``int(channels/ratio)``. Defaults to 16.
act_cfg (dict): Config dict for activation layer
Defaults to dict(type='ReLU').
"""
def __init__(self,
channels: int,
reduce_ratio: int = 16,
act_cfg: dict = dict(type='ReLU')):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc = nn.Sequential(
ConvModule(
in_channels=channels,
out_channels=int(channels / reduce_ratio),
kernel_size=1,
stride=1,
conv_cfg=None,
act_cfg=act_cfg),
ConvModule(
in_channels=int(channels / reduce_ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=None,
act_cfg=None))
self.sigmoid = nn.Sigmoid()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward function."""
avgpool_out = self.fc(self.avg_pool(x))
maxpool_out = self.fc(self.max_pool(x))
out = self.sigmoid(avgpool_out + maxpool_out)
return out
class SpatialAttention(BaseModule):
"""SpatialAttention
Args:
kernel_size (int): The size of the convolution kernel in
SpatialAttention. Defaults to 7.
"""
def __init__(self, kernel_size: int = 7):
super().__init__()
self.conv = ConvModule(
in_channels=2,
out_channels=1,
kernel_size=kernel_size,
stride=1,
padding=kernel_size // 2,
conv_cfg=None,
act_cfg=dict(type='Sigmoid'))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward function."""
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
out = torch.cat([avg_out, max_out], dim=1)
out = self.conv(out)
return out
@MODELS.register_module()
class CBAM(BaseModule):
"""Convolutional Block Attention Module. arxiv link:
https://arxiv.org/abs/1807.06521v2.
Args:
in_channels (int): The input (and output) channels of the CBAM.
reduce_ratio (int): Squeeze ratio in ChannelAttention, the intermediate
channel will be ``int(channels/ratio)``. Defaults to 16.
kernel_size (int): The size of the convolution kernel in
SpatialAttention. Defaults to 7.
act_cfg (dict): Config dict for activation layer in ChannelAttention
Defaults to dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
reduce_ratio: int = 16,
kernel_size: int = 7,
act_cfg: dict = dict(type='ReLU'),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg)
self.channel_attention = ChannelAttention(
channels=in_channels, reduce_ratio=reduce_ratio, act_cfg=act_cfg)
self.spatial_attention = SpatialAttention(kernel_size)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward function."""
out = self.channel_attention(x) * x
out = self.spatial_attention(out) * out
return out
| 3,949 | 31.916667 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/yolox_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.backbones.csp_darknet import CSPLayer
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from .base_yolo_neck import BaseYOLONeck
@MODELS.register_module()
class YOLOXPAFPN(BaseYOLONeck):
"""Path Aggregation Network used in YOLOX.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
freeze_all(bool): Whether to freeze the model. Defaults to False.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: List[int],
out_channels: int,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
num_csp_blocks: int = 3,
use_depthwise: bool = False,
freeze_all: bool = False,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
self.num_csp_blocks = round(num_csp_blocks * deepen_factor)
self.use_depthwise = use_depthwise
super().__init__(
in_channels=[
int(channel * widen_factor) for channel in in_channels
],
out_channels=int(out_channels * widen_factor),
deepen_factor=deepen_factor,
widen_factor=widen_factor,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def build_reduce_layer(self, idx: int) -> nn.Module:
"""build reduce layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The reduce layer.
"""
if idx == 2:
layer = ConvModule(
self.in_channels[idx],
self.in_channels[idx - 1],
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
layer = nn.Identity()
return layer
def build_upsample_layer(self, *args, **kwargs) -> nn.Module:
"""build upsample layer."""
return nn.Upsample(scale_factor=2, mode='nearest')
def build_top_down_layer(self, idx: int) -> nn.Module:
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
if idx == 1:
return CSPLayer(
self.in_channels[idx - 1] * 2,
self.in_channels[idx - 1],
num_blocks=self.num_csp_blocks,
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
elif idx == 2:
return nn.Sequential(
CSPLayer(
self.in_channels[idx - 1] * 2,
self.in_channels[idx - 1],
num_blocks=self.num_csp_blocks,
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
self.in_channels[idx - 1],
self.in_channels[idx - 2],
kernel_size=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def build_downsample_layer(self, idx: int) -> nn.Module:
"""build downsample layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The downsample layer.
"""
conv = DepthwiseSeparableConvModule \
if self.use_depthwise else ConvModule
return conv(
self.in_channels[idx],
self.in_channels[idx],
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
return CSPLayer(
self.in_channels[idx] * 2,
self.in_channels[idx + 1],
num_blocks=self.num_csp_blocks,
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_out_layer(self, idx: int) -> nn.Module:
"""build out layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The out layer.
"""
return ConvModule(
self.in_channels[idx],
self.out_channels,
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
| 5,747 | 32.225434 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/yolov8_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch.nn as nn
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from .. import CSPLayerWithTwoConv
from ..utils import make_divisible, make_round
from .yolov5_pafpn import YOLOv5PAFPN
@MODELS.register_module()
class YOLOv8PAFPN(YOLOv5PAFPN):
"""Path Aggregation Network used in YOLOv8.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1.
freeze_all(bool): Whether to freeze the model
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: List[int],
out_channels: Union[List[int], int],
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
num_csp_blocks: int = 3,
freeze_all: bool = False,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
deepen_factor=deepen_factor,
widen_factor=widen_factor,
num_csp_blocks=num_csp_blocks,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def build_reduce_layer(self, idx: int) -> nn.Module:
"""build reduce layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The reduce layer.
"""
return nn.Identity()
def build_top_down_layer(self, idx: int) -> nn.Module:
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
return CSPLayerWithTwoConv(
make_divisible((self.in_channels[idx - 1] + self.in_channels[idx]),
self.widen_factor),
make_divisible(self.out_channels[idx - 1], self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
return CSPLayerWithTwoConv(
make_divisible(
(self.out_channels[idx] + self.out_channels[idx + 1]),
self.widen_factor),
make_divisible(self.out_channels[idx + 1], self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
| 3,716 | 35.087379 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/yolov6_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from ..layers import BepC3StageBlock, RepStageBlock
from ..utils import make_round
from .base_yolo_neck import BaseYOLONeck
@MODELS.register_module()
class YOLOv6RepPAFPN(BaseYOLONeck):
"""Path Aggregation Network used in YOLOv6.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1.
freeze_all(bool): Whether to freeze the model.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='ReLU', inplace=True).
block_cfg (dict): Config dict for the block used to build each
layer. Defaults to dict(type='RepVGGBlock').
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: List[int],
out_channels: int,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
num_csp_blocks: int = 12,
freeze_all: bool = False,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='ReLU', inplace=True),
block_cfg: ConfigType = dict(type='RepVGGBlock'),
init_cfg: OptMultiConfig = None):
self.num_csp_blocks = num_csp_blocks
self.block_cfg = block_cfg
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
deepen_factor=deepen_factor,
widen_factor=widen_factor,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def build_reduce_layer(self, idx: int) -> nn.Module:
"""build reduce layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The reduce layer.
"""
if idx == 2:
layer = ConvModule(
in_channels=int(self.in_channels[idx] * self.widen_factor),
out_channels=int(self.out_channels[idx - 1] *
self.widen_factor),
kernel_size=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
layer = nn.Identity()
return layer
def build_upsample_layer(self, idx: int) -> nn.Module:
"""build upsample layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The upsample layer.
"""
return nn.ConvTranspose2d(
in_channels=int(self.out_channels[idx - 1] * self.widen_factor),
out_channels=int(self.out_channels[idx - 1] * self.widen_factor),
kernel_size=2,
stride=2,
bias=True)
def build_top_down_layer(self, idx: int) -> nn.Module:
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
block_cfg = self.block_cfg.copy()
layer0 = RepStageBlock(
in_channels=int(
(self.out_channels[idx - 1] + self.in_channels[idx - 1]) *
self.widen_factor),
out_channels=int(self.out_channels[idx - 1] * self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
block_cfg=block_cfg)
if idx == 1:
return layer0
elif idx == 2:
layer1 = ConvModule(
in_channels=int(self.out_channels[idx - 1] *
self.widen_factor),
out_channels=int(self.out_channels[idx - 2] *
self.widen_factor),
kernel_size=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
return nn.Sequential(layer0, layer1)
def build_downsample_layer(self, idx: int) -> nn.Module:
"""build downsample layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The downsample layer.
"""
return ConvModule(
in_channels=int(self.out_channels[idx] * self.widen_factor),
out_channels=int(self.out_channels[idx] * self.widen_factor),
kernel_size=3,
stride=2,
padding=3 // 2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
block_cfg = self.block_cfg.copy()
return RepStageBlock(
in_channels=int(self.out_channels[idx] * 2 * self.widen_factor),
out_channels=int(self.out_channels[idx + 1] * self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
block_cfg=block_cfg)
def build_out_layer(self, *args, **kwargs) -> nn.Module:
"""build out layer."""
return nn.Identity()
def init_weights(self):
if self.init_cfg is None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
else:
super().init_weights()
@MODELS.register_module()
class YOLOv6CSPRepPAFPN(YOLOv6RepPAFPN):
"""Path Aggregation Network used in YOLOv6.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1.
freeze_all(bool): Whether to freeze the model.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='ReLU', inplace=True).
block_cfg (dict): Config dict for the block used to build each
layer. Defaults to dict(type='RepVGGBlock').
block_act_cfg (dict): Config dict for activation layer used in each
stage. Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: List[int],
out_channels: int,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
hidden_ratio: float = 0.5,
num_csp_blocks: int = 12,
freeze_all: bool = False,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='ReLU', inplace=True),
block_act_cfg: ConfigType = dict(type='SiLU', inplace=True),
block_cfg: ConfigType = dict(type='RepVGGBlock'),
init_cfg: OptMultiConfig = None):
self.hidden_ratio = hidden_ratio
self.block_act_cfg = block_act_cfg
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
deepen_factor=deepen_factor,
widen_factor=widen_factor,
num_csp_blocks=num_csp_blocks,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
block_cfg=block_cfg,
init_cfg=init_cfg)
def build_top_down_layer(self, idx: int) -> nn.Module:
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
block_cfg = self.block_cfg.copy()
layer0 = BepC3StageBlock(
in_channels=int(
(self.out_channels[idx - 1] + self.in_channels[idx - 1]) *
self.widen_factor),
out_channels=int(self.out_channels[idx - 1] * self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
block_cfg=block_cfg,
hidden_ratio=self.hidden_ratio,
norm_cfg=self.norm_cfg,
act_cfg=self.block_act_cfg)
if idx == 1:
return layer0
elif idx == 2:
layer1 = ConvModule(
in_channels=int(self.out_channels[idx - 1] *
self.widen_factor),
out_channels=int(self.out_channels[idx - 2] *
self.widen_factor),
kernel_size=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
return nn.Sequential(layer0, layer1)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
block_cfg = self.block_cfg.copy()
return BepC3StageBlock(
in_channels=int(self.out_channels[idx] * 2 * self.widen_factor),
out_channels=int(self.out_channels[idx + 1] * self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
block_cfg=block_cfg,
hidden_ratio=self.hidden_ratio,
norm_cfg=self.norm_cfg,
act_cfg=self.block_act_cfg)
| 10,763 | 36.636364 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/yolov5_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.backbones.csp_darknet import CSPLayer
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from ..utils import make_divisible, make_round
from .base_yolo_neck import BaseYOLONeck
@MODELS.register_module()
class YOLOv5PAFPN(BaseYOLONeck):
"""Path Aggregation Network used in YOLOv5.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Defaults to 1.
freeze_all(bool): Whether to freeze the model
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: List[int],
out_channels: Union[List[int], int],
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
num_csp_blocks: int = 1,
freeze_all: bool = False,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
self.num_csp_blocks = num_csp_blocks
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
deepen_factor=deepen_factor,
widen_factor=widen_factor,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def init_weights(self):
if self.init_cfg is None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
else:
super().init_weights()
def build_reduce_layer(self, idx: int) -> nn.Module:
"""build reduce layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The reduce layer.
"""
if idx == len(self.in_channels) - 1:
layer = ConvModule(
make_divisible(self.in_channels[idx], self.widen_factor),
make_divisible(self.in_channels[idx - 1], self.widen_factor),
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
layer = nn.Identity()
return layer
def build_upsample_layer(self, *args, **kwargs) -> nn.Module:
"""build upsample layer."""
return nn.Upsample(scale_factor=2, mode='nearest')
def build_top_down_layer(self, idx: int):
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
if idx == 1:
return CSPLayer(
make_divisible(self.in_channels[idx - 1] * 2,
self.widen_factor),
make_divisible(self.in_channels[idx - 1], self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
return nn.Sequential(
CSPLayer(
make_divisible(self.in_channels[idx - 1] * 2,
self.widen_factor),
make_divisible(self.in_channels[idx - 1],
self.widen_factor),
num_blocks=make_round(self.num_csp_blocks,
self.deepen_factor),
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
make_divisible(self.in_channels[idx - 1],
self.widen_factor),
make_divisible(self.in_channels[idx - 2],
self.widen_factor),
kernel_size=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def build_downsample_layer(self, idx: int) -> nn.Module:
"""build downsample layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The downsample layer.
"""
return ConvModule(
make_divisible(self.in_channels[idx], self.widen_factor),
make_divisible(self.in_channels[idx], self.widen_factor),
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
return CSPLayer(
make_divisible(self.in_channels[idx] * 2, self.widen_factor),
make_divisible(self.in_channels[idx + 1], self.widen_factor),
num_blocks=make_round(self.num_csp_blocks, self.deepen_factor),
add_identity=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_out_layer(self, *args, **kwargs) -> nn.Module:
"""build out layer."""
return nn.Identity()
| 6,273 | 35.476744 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/cspnext_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.backbones.csp_darknet import CSPLayer
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from .base_yolo_neck import BaseYOLONeck
@MODELS.register_module()
class CSPNeXtPAFPN(BaseYOLONeck):
"""Path Aggregation Network with CSPNeXt blocks.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_csp_blocks (int): Number of bottlenecks in CSPLayer.
Defaults to 3.
use_depthwise (bool): Whether to use depthwise separable convolution in
blocks. Defaults to False.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Defaults to 0.5.
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='SiLU', inplace=True)
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(
self,
in_channels: Sequence[int],
out_channels: int,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
num_csp_blocks: int = 3,
freeze_all: bool = False,
use_depthwise: bool = False,
expand_ratio: float = 0.5,
upsample_cfg: ConfigType = dict(scale_factor=2, mode='nearest'),
conv_cfg: bool = None,
norm_cfg: ConfigType = dict(type='BN'),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')
) -> None:
self.num_csp_blocks = round(num_csp_blocks * deepen_factor)
self.conv = DepthwiseSeparableConvModule \
if use_depthwise else ConvModule
self.upsample_cfg = upsample_cfg
self.expand_ratio = expand_ratio
self.conv_cfg = conv_cfg
super().__init__(
in_channels=[
int(channel * widen_factor) for channel in in_channels
],
out_channels=int(out_channels * widen_factor),
deepen_factor=deepen_factor,
widen_factor=widen_factor,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def build_reduce_layer(self, idx: int) -> nn.Module:
"""build reduce layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The reduce layer.
"""
if idx == len(self.in_channels) - 1:
layer = self.conv(
self.in_channels[idx],
self.in_channels[idx - 1],
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
layer = nn.Identity()
return layer
def build_upsample_layer(self, *args, **kwargs) -> nn.Module:
"""build upsample layer."""
return nn.Upsample(**self.upsample_cfg)
def build_top_down_layer(self, idx: int) -> nn.Module:
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
if idx == 1:
return CSPLayer(
self.in_channels[idx - 1] * 2,
self.in_channels[idx - 1],
num_blocks=self.num_csp_blocks,
add_identity=False,
use_cspnext_block=True,
expand_ratio=self.expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
return nn.Sequential(
CSPLayer(
self.in_channels[idx - 1] * 2,
self.in_channels[idx - 1],
num_blocks=self.num_csp_blocks,
add_identity=False,
use_cspnext_block=True,
expand_ratio=self.expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
self.conv(
self.in_channels[idx - 1],
self.in_channels[idx - 2],
kernel_size=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
def build_downsample_layer(self, idx: int) -> nn.Module:
"""build downsample layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The downsample layer.
"""
return self.conv(
self.in_channels[idx],
self.in_channels[idx],
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
return CSPLayer(
self.in_channels[idx] * 2,
self.in_channels[idx + 1],
num_blocks=self.num_csp_blocks,
add_identity=False,
use_cspnext_block=True,
expand_ratio=self.expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_out_layer(self, idx: int) -> nn.Module:
"""build out layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The out layer.
"""
return self.conv(
self.in_channels[idx],
self.out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
| 6,750 | 32.420792 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/yolov7_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from ..layers import MaxPoolAndStrideConvBlock, RepVGGBlock, SPPFCSPBlock
from .base_yolo_neck import BaseYOLONeck
@MODELS.register_module()
class YOLOv7PAFPN(BaseYOLONeck):
"""Path Aggregation Network used in YOLOv7.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
block_cfg (dict): Config dict for block.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
spp_expand_ratio (float): Expand ratio of SPPCSPBlock.
Defaults to 0.5.
is_tiny_version (bool): Is tiny version of neck. If True,
it means it is a yolov7 tiny model. Defaults to False.
use_maxpool_in_downsample (bool): Whether maxpooling is
used in downsample layers. Defaults to True.
use_in_channels_in_downsample (bool): MaxPoolAndStrideConvBlock
module input parameters. Defaults to False.
use_repconv_outs (bool): Whether to use `repconv` in the output
layer. Defaults to True.
upsample_feats_cat_first (bool): Whether the output features are
concat first after upsampling in the topdown module.
Defaults to True. Currently only YOLOv7 is false.
freeze_all(bool): Whether to freeze the model. Defaults to False.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: List[int],
out_channels: List[int],
block_cfg: dict = dict(
type='ELANBlock',
middle_ratio=0.5,
block_ratio=0.25,
num_blocks=4,
num_convs_in_block=1),
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
spp_expand_ratio: float = 0.5,
is_tiny_version: bool = False,
use_maxpool_in_downsample: bool = True,
use_in_channels_in_downsample: bool = False,
use_repconv_outs: bool = True,
upsample_feats_cat_first: bool = False,
freeze_all: bool = False,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
self.is_tiny_version = is_tiny_version
self.use_maxpool_in_downsample = use_maxpool_in_downsample
self.use_in_channels_in_downsample = use_in_channels_in_downsample
self.spp_expand_ratio = spp_expand_ratio
self.use_repconv_outs = use_repconv_outs
self.block_cfg = block_cfg
self.block_cfg.setdefault('norm_cfg', norm_cfg)
self.block_cfg.setdefault('act_cfg', act_cfg)
super().__init__(
in_channels=[
int(channel * widen_factor) for channel in in_channels
],
out_channels=[
int(channel * widen_factor) for channel in out_channels
],
deepen_factor=deepen_factor,
widen_factor=widen_factor,
upsample_feats_cat_first=upsample_feats_cat_first,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def build_reduce_layer(self, idx: int) -> nn.Module:
"""build reduce layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The reduce layer.
"""
if idx == len(self.in_channels) - 1:
layer = SPPFCSPBlock(
self.in_channels[idx],
self.out_channels[idx],
expand_ratio=self.spp_expand_ratio,
is_tiny_version=self.is_tiny_version,
kernel_sizes=5,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
layer = ConvModule(
self.in_channels[idx],
self.out_channels[idx],
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
return layer
def build_upsample_layer(self, idx: int) -> nn.Module:
"""build upsample layer."""
return nn.Sequential(
ConvModule(
self.out_channels[idx],
self.out_channels[idx - 1],
1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
nn.Upsample(scale_factor=2, mode='nearest'))
def build_top_down_layer(self, idx: int) -> nn.Module:
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
block_cfg = self.block_cfg.copy()
block_cfg['in_channels'] = self.out_channels[idx - 1] * 2
block_cfg['out_channels'] = self.out_channels[idx - 1]
return MODELS.build(block_cfg)
def build_downsample_layer(self, idx: int) -> nn.Module:
"""build downsample layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The downsample layer.
"""
if self.use_maxpool_in_downsample and not self.is_tiny_version:
return MaxPoolAndStrideConvBlock(
self.out_channels[idx],
self.out_channels[idx + 1],
use_in_channels_of_middle=self.use_in_channels_in_downsample,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
return ConvModule(
self.out_channels[idx],
self.out_channels[idx + 1],
3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
block_cfg = self.block_cfg.copy()
block_cfg['in_channels'] = self.out_channels[idx + 1] * 2
block_cfg['out_channels'] = self.out_channels[idx + 1]
return MODELS.build(block_cfg)
def build_out_layer(self, idx: int) -> nn.Module:
"""build out layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The out layer.
"""
if len(self.in_channels) == 4:
# P6
return nn.Identity()
out_channels = self.out_channels[idx] * 2
if self.use_repconv_outs:
return RepVGGBlock(
self.out_channels[idx],
out_channels,
3,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
return ConvModule(
self.out_channels[idx],
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
| 7,846 | 35.16129 | 77 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/base_yolo_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Union
import torch
import torch.nn as nn
from mmdet.utils import ConfigType, OptMultiConfig
from mmengine.model import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.registry import MODELS
@MODELS.register_module()
class BaseYOLONeck(BaseModule, metaclass=ABCMeta):
"""Base neck used in YOLO series.
.. code:: text
P5 neck model structure diagram
+--------+ +-------+
|top_down|----------+--------->| out |---> output0
| layer1 | | | layer0|
+--------+ | +-------+
stride=8 ^ |
idx=0 +------+ +--------+ |
-----> |reduce|--->| cat | |
|layer0| +--------+ |
+------+ ^ v
+--------+ +-----------+
|upsample| |downsample |
| layer1 | | layer0 |
+--------+ +-----------+
^ |
+--------+ v
|top_down| +-----------+
| layer2 |--->| cat |
+--------+ +-----------+
stride=16 ^ v
idx=1 +------+ +--------+ +-----------+ +-------+
-----> |reduce|--->| cat | | bottom_up |--->| out |---> output1
|layer1| +--------+ | layer0 | | layer1|
+------+ ^ +-----------+ +-------+
| v
+--------+ +-----------+
|upsample| |downsample |
| layer2 | | layer1 |
stride=32 +--------+ +-----------+
idx=2 +------+ ^ v
-----> |reduce| | +-----------+
|layer2|---------+------->| cat |
+------+ +-----------+
v
+-----------+ +-------+
| bottom_up |--->| out |---> output2
| layer1 | | layer2|
+-----------+ +-------+
.. code:: text
P6 neck model structure diagram
+--------+ +-------+
|top_down|----------+--------->| out |---> output0
| layer1 | | | layer0|
+--------+ | +-------+
stride=8 ^ |
idx=0 +------+ +--------+ |
-----> |reduce|--->| cat | |
|layer0| +--------+ |
+------+ ^ v
+--------+ +-----------+
|upsample| |downsample |
| layer1 | | layer0 |
+--------+ +-----------+
^ |
+--------+ v
|top_down| +-----------+
| layer2 |--->| cat |
+--------+ +-----------+
stride=16 ^ v
idx=1 +------+ +--------+ +-----------+ +-------+
-----> |reduce|--->| cat | | bottom_up |--->| out |---> output1
|layer1| +--------+ | layer0 | | layer1|
+------+ ^ +-----------+ +-------+
| v
+--------+ +-----------+
|upsample| |downsample |
| layer2 | | layer1 |
+--------+ +-----------+
^ |
+--------+ v
|top_down| +-----------+
| layer3 |--->| cat |
+--------+ +-----------+
stride=32 ^ v
idx=2 +------+ +--------+ +-----------+ +-------+
-----> |reduce|--->| cat | | bottom_up |--->| out |---> output2
|layer2| +--------+ | layer1 | | layer2|
+------+ ^ +-----------+ +-------+
| v
+--------+ +-----------+
|upsample| |downsample |
| layer3 | | layer2 |
+--------+ +-----------+
stride=64 ^ v
idx=3 +------+ | +-----------+
-----> |reduce|---------+------->| cat |
|layer3| +-----------+
+------+ v
+-----------+ +-------+
| bottom_up |--->| out |---> output3
| layer2 | | layer3|
+-----------+ +-------+
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
upsample_feats_cat_first (bool): Whether the output features are
concat first after upsampling in the topdown module.
Defaults to True. Currently only YOLOv7 is false.
freeze_all(bool): Whether to freeze the model. Defaults to False
norm_cfg (dict): Config dict for normalization layer.
Defaults to None.
act_cfg (dict): Config dict for activation layer.
Defaults to None.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: List[int],
out_channels: Union[int, List[int]],
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
upsample_feats_cat_first: bool = True,
freeze_all: bool = False,
norm_cfg: ConfigType = None,
act_cfg: ConfigType = None,
init_cfg: OptMultiConfig = None,
**kwargs):
super().__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
self.deepen_factor = deepen_factor
self.widen_factor = widen_factor
self.upsample_feats_cat_first = upsample_feats_cat_first
self.freeze_all = freeze_all
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.reduce_layers = nn.ModuleList()
for idx in range(len(in_channels)):
self.reduce_layers.append(self.build_reduce_layer(idx))
# build top-down blocks
self.upsample_layers = nn.ModuleList()
self.top_down_layers = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.upsample_layers.append(self.build_upsample_layer(idx))
self.top_down_layers.append(self.build_top_down_layer(idx))
# build bottom-up blocks
self.downsample_layers = nn.ModuleList()
self.bottom_up_layers = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsample_layers.append(self.build_downsample_layer(idx))
self.bottom_up_layers.append(self.build_bottom_up_layer(idx))
self.out_layers = nn.ModuleList()
for idx in range(len(in_channels)):
self.out_layers.append(self.build_out_layer(idx))
@abstractmethod
def build_reduce_layer(self, idx: int):
"""build reduce layer."""
pass
@abstractmethod
def build_upsample_layer(self, idx: int):
"""build upsample layer."""
pass
@abstractmethod
def build_top_down_layer(self, idx: int):
"""build top down layer."""
pass
@abstractmethod
def build_downsample_layer(self, idx: int):
"""build downsample layer."""
pass
@abstractmethod
def build_bottom_up_layer(self, idx: int):
"""build bottom up layer."""
pass
@abstractmethod
def build_out_layer(self, idx: int):
"""build out layer."""
pass
def _freeze_all(self):
"""Freeze the model."""
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode=True):
"""Convert the model into training mode while keep the normalization
layer freezed."""
super().train(mode)
if self.freeze_all:
self._freeze_all()
def forward(self, inputs: List[torch.Tensor]) -> tuple:
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# reduce layers
reduce_outs = []
for idx in range(len(self.in_channels)):
reduce_outs.append(self.reduce_layers[idx](inputs[idx]))
# top-down path
inner_outs = [reduce_outs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_high = inner_outs[0]
feat_low = reduce_outs[idx - 1]
upsample_feat = self.upsample_layers[len(self.in_channels) - 1 -
idx](
feat_high)
if self.upsample_feats_cat_first:
top_down_layer_inputs = torch.cat([upsample_feat, feat_low], 1)
else:
top_down_layer_inputs = torch.cat([feat_low, upsample_feat], 1)
inner_out = self.top_down_layers[len(self.in_channels) - 1 - idx](
top_down_layer_inputs)
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_high = inner_outs[idx + 1]
downsample_feat = self.downsample_layers[idx](feat_low)
out = self.bottom_up_layers[idx](
torch.cat([downsample_feat, feat_high], 1))
outs.append(out)
# out_layers
results = []
for idx in range(len(self.in_channels)):
results.append(self.out_layers[idx](outs[idx]))
return tuple(results)
| 11,105 | 41.389313 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/necks/ppyoloe_csppan.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.models.backbones.csp_resnet import CSPResLayer
from mmyolo.models.necks import BaseYOLONeck
from mmyolo.registry import MODELS
@MODELS.register_module()
class PPYOLOECSPPAFPN(BaseYOLONeck):
"""CSPPAN in PPYOLOE.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (List[int]): Number of output channels
(used at each scale).
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
freeze_all(bool): Whether to freeze the model.
num_csplayer (int): Number of `CSPResLayer` in per layer.
Defaults to 1.
num_blocks_per_layer (int): Number of blocks per `CSPResLayer`.
Defaults to 3.
block_cfg (dict): Config dict for block. Defaults to
dict(type='PPYOLOEBasicBlock', shortcut=True, use_alpha=False)
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.1, eps=1e-5).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
drop_block_cfg (dict, optional): Drop block config.
Defaults to None. If you want to use Drop block after
`CSPResLayer`, you can set this para as
dict(type='mmdet.DropBlock', drop_prob=0.1,
block_size=3, warm_iters=0).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
use_spp (bool): Whether to use `SPP` in reduce layer.
Defaults to False.
"""
def __init__(self,
in_channels: List[int] = [256, 512, 1024],
out_channels: List[int] = [256, 512, 1024],
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
freeze_all: bool = False,
num_csplayer: int = 1,
num_blocks_per_layer: int = 3,
block_cfg: ConfigType = dict(
type='PPYOLOEBasicBlock', shortcut=False,
use_alpha=False),
norm_cfg: ConfigType = dict(
type='BN', momentum=0.1, eps=1e-5),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
drop_block_cfg: ConfigType = None,
init_cfg: OptMultiConfig = None,
use_spp: bool = False):
self.block_cfg = block_cfg
self.num_csplayer = num_csplayer
self.num_blocks_per_layer = round(num_blocks_per_layer * deepen_factor)
# Only use spp in last reduce_layer, if use_spp=True.
self.use_spp = use_spp
self.drop_block_cfg = drop_block_cfg
assert drop_block_cfg is None or isinstance(drop_block_cfg, dict)
super().__init__(
in_channels=[
int(channel * widen_factor) for channel in in_channels
],
out_channels=[
int(channel * widen_factor) for channel in out_channels
],
deepen_factor=deepen_factor,
widen_factor=widen_factor,
freeze_all=freeze_all,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def build_reduce_layer(self, idx: int):
"""build reduce layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The reduce layer.
"""
if idx == len(self.in_channels) - 1:
# fpn_stage
in_channels = self.in_channels[idx]
out_channels = self.out_channels[idx]
layer = [
CSPResLayer(
in_channels=in_channels if i == 0 else out_channels,
out_channels=out_channels,
num_block=self.num_blocks_per_layer,
block_cfg=self.block_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
attention_cfg=None,
use_spp=self.use_spp) for i in range(self.num_csplayer)
]
if self.drop_block_cfg:
layer.append(MODELS.build(self.drop_block_cfg))
layer = nn.Sequential(*layer)
else:
layer = nn.Identity()
return layer
def build_upsample_layer(self, idx: int) -> nn.Module:
"""build upsample layer."""
# fpn_route
in_channels = self.out_channels[idx]
return nn.Sequential(
ConvModule(
in_channels=in_channels,
out_channels=in_channels // 2,
kernel_size=1,
stride=1,
padding=0,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
nn.Upsample(scale_factor=2, mode='nearest'))
def build_top_down_layer(self, idx: int) -> nn.Module:
"""build top down layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The top down layer.
"""
# fpn_stage
in_channels = self.in_channels[idx - 1] + self.out_channels[idx] // 2
out_channels = self.out_channels[idx - 1]
layer = [
CSPResLayer(
in_channels=in_channels if i == 0 else out_channels,
out_channels=out_channels,
num_block=self.num_blocks_per_layer,
block_cfg=self.block_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
attention_cfg=None,
use_spp=False) for i in range(self.num_csplayer)
]
if self.drop_block_cfg:
layer.append(MODELS.build(self.drop_block_cfg))
return nn.Sequential(*layer)
def build_downsample_layer(self, idx: int) -> nn.Module:
"""build downsample layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The downsample layer.
"""
# pan_route
return ConvModule(
in_channels=self.out_channels[idx],
out_channels=self.out_channels[idx],
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_bottom_up_layer(self, idx: int) -> nn.Module:
"""build bottom up layer.
Args:
idx (int): layer idx.
Returns:
nn.Module: The bottom up layer.
"""
# pan_stage
in_channels = self.out_channels[idx + 1] + self.out_channels[idx]
out_channels = self.out_channels[idx + 1]
layer = [
CSPResLayer(
in_channels=in_channels if i == 0 else out_channels,
out_channels=out_channels,
num_block=self.num_blocks_per_layer,
block_cfg=self.block_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
attention_cfg=None,
use_spp=False) for i in range(self.num_csplayer)
]
if self.drop_block_cfg:
layer.append(MODELS.build(self.drop_block_cfg))
return nn.Sequential(*layer)
def build_out_layer(self, *args, **kwargs) -> nn.Module:
"""build out layer."""
return nn.Identity()
| 7,704 | 34.506912 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/layers/yolo_bricks.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule, MaxPool2d,
build_norm_layer)
from mmdet.models.layers.csp_layer import \
DarknetBottleneck as MMDET_DarknetBottleneck
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from mmengine.model import BaseModule
from mmengine.utils import digit_version
from torch import Tensor
from mmyolo.registry import MODELS
if digit_version(torch.__version__) >= digit_version('1.7.0'):
MODELS.register_module(module=nn.SiLU, name='SiLU')
else:
class SiLU(nn.Module):
"""Sigmoid Weighted Liner Unit."""
def __init__(self, inplace=True):
super().__init__()
def forward(self, inputs) -> Tensor:
return inputs * torch.sigmoid(inputs)
MODELS.register_module(module=SiLU, name='SiLU')
class SPPFBottleneck(BaseModule):
"""Spatial pyramid pooling - Fast (SPPF) layer for
YOLOv5, YOLOX and PPYOLOE by Glenn Jocher
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
kernel_sizes (int, tuple[int]): Sequential or number of kernel
sizes of pooling layers. Defaults to 5.
use_conv_first (bool): Whether to use conv before pooling layer.
In YOLOv5 and YOLOX, the para set to True.
In PPYOLOE, the para set to False.
Defaults to True.
mid_channels_scale (float): Channel multiplier, multiply in_channels
by this amount to get mid_channels. This parameter is valid only
when use_conv_fist=True.Defaults to 0.5.
conv_cfg (dict): Config dict for convolution layer. Defaults to None.
which means using conv2d. Defaults to None.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_sizes: Union[int, Sequence[int]] = 5,
use_conv_first: bool = True,
mid_channels_scale: float = 0.5,
conv_cfg: ConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg)
if use_conv_first:
mid_channels = int(in_channels * mid_channels_scale)
self.conv1 = ConvModule(
in_channels,
mid_channels,
1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
mid_channels = in_channels
self.conv1 = None
self.kernel_sizes = kernel_sizes
if isinstance(kernel_sizes, int):
self.poolings = nn.MaxPool2d(
kernel_size=kernel_sizes, stride=1, padding=kernel_sizes // 2)
conv2_in_channels = mid_channels * 4
else:
self.poolings = nn.ModuleList([
nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
for ks in kernel_sizes
])
conv2_in_channels = mid_channels * (len(kernel_sizes) + 1)
self.conv2 = ConvModule(
conv2_in_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x: Tensor) -> Tensor:
"""Forward process
Args:
x (Tensor): The input tensor.
"""
if self.conv1:
x = self.conv1(x)
if isinstance(self.kernel_sizes, int):
y1 = self.poolings(x)
y2 = self.poolings(y1)
x = torch.cat([x, y1, y2, self.poolings(y2)], dim=1)
else:
x = torch.cat(
[x] + [pooling(x) for pooling in self.poolings], dim=1)
x = self.conv2(x)
return x
@MODELS.register_module()
class RepVGGBlock(nn.Module):
"""RepVGGBlock is a basic rep-style block, including training and deploy
status This code is based on
https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple): Stride of the convolution. Default: 1
padding (int, tuple): Padding added to all four sides of
the input. Default: 1
dilation (int or tuple): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
padding_mode (string, optional): Default: 'zeros'
use_se (bool): Whether to use se. Default: False
use_alpha (bool): Whether to use `alpha` parameter at 1x1 conv.
In PPYOLOE+ model backbone, `use_alpha` will be set to True.
Default: False.
use_bn_first (bool): Whether to use bn layer before conv.
In YOLOv6 and YOLOv7, this will be set to True.
In PPYOLOE, this will be set to False.
Default: True.
deploy (bool): Whether in deploy mode. Default: False
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, Tuple[int]] = 3,
stride: Union[int, Tuple[int]] = 1,
padding: Union[int, Tuple[int]] = 1,
dilation: Union[int, Tuple[int]] = 1,
groups: Optional[int] = 1,
padding_mode: Optional[str] = 'zeros',
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='ReLU', inplace=True),
use_se: bool = False,
use_alpha: bool = False,
use_bn_first=True,
deploy: bool = False):
super().__init__()
self.deploy = deploy
self.groups = groups
self.in_channels = in_channels
self.out_channels = out_channels
assert kernel_size == 3
assert padding == 1
padding_11 = padding - kernel_size // 2
self.nonlinearity = MODELS.build(act_cfg)
if use_se:
raise NotImplementedError('se block not supported yet')
else:
self.se = nn.Identity()
if use_alpha:
alpha = torch.ones([
1,
], dtype=torch.float32, requires_grad=True)
self.alpha = nn.Parameter(alpha, requires_grad=True)
else:
self.alpha = None
if deploy:
self.rbr_reparam = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=True,
padding_mode=padding_mode)
else:
if use_bn_first and (out_channels == in_channels) and stride == 1:
self.rbr_identity = build_norm_layer(
norm_cfg, num_features=in_channels)[1]
else:
self.rbr_identity = None
self.rbr_dense = ConvModule(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
norm_cfg=norm_cfg,
act_cfg=None)
self.rbr_1x1 = ConvModule(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding_11,
groups=groups,
bias=False,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, inputs: Tensor) -> Tensor:
"""Forward process.
Args:
inputs (Tensor): The input tensor.
Returns:
Tensor: The output tensor.
"""
if hasattr(self, 'rbr_reparam'):
return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
if self.rbr_identity is None:
id_out = 0
else:
id_out = self.rbr_identity(inputs)
if self.alpha:
return self.nonlinearity(
self.se(
self.rbr_dense(inputs) +
self.alpha * self.rbr_1x1(inputs) + id_out))
else:
return self.nonlinearity(
self.se(
self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
def get_equivalent_kernel_bias(self):
"""Derives the equivalent kernel and bias in a differentiable way.
Returns:
tuple: Equivalent kernel and bias
"""
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
if self.alpha:
return kernel3x3 + self.alpha * self._pad_1x1_to_3x3_tensor(
kernel1x1) + kernelid, bias3x3 + self.alpha * bias1x1 + biasid
else:
return kernel3x3 + self._pad_1x1_to_3x3_tensor(
kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
"""Pad 1x1 tensor to 3x3.
Args:
kernel1x1 (Tensor): The input 1x1 kernel need to be padded.
Returns:
Tensor: 3x3 kernel after padded.
"""
if kernel1x1 is None:
return 0
else:
return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
def _fuse_bn_tensor(self, branch: nn.Module) -> Tuple[np.ndarray, Tensor]:
"""Derives the equivalent kernel and bias of a specific branch layer.
Args:
branch (nn.Module): The layer that needs to be equivalently
transformed, which can be nn.Sequential or nn.Batchnorm2d
Returns:
tuple: Equivalent kernel and bias
"""
if branch is None:
return 0, 0
if isinstance(branch, ConvModule):
kernel = branch.conv.weight
running_mean = branch.bn.running_mean
running_var = branch.bn.running_var
gamma = branch.bn.weight
beta = branch.bn.bias
eps = branch.bn.eps
else:
assert isinstance(branch, (nn.SyncBatchNorm, nn.BatchNorm2d))
if not hasattr(self, 'id_tensor'):
input_dim = self.in_channels // self.groups
kernel_value = np.zeros((self.in_channels, input_dim, 3, 3),
dtype=np.float32)
for i in range(self.in_channels):
kernel_value[i, i % input_dim, 1, 1] = 1
self.id_tensor = torch.from_numpy(kernel_value).to(
branch.weight.device)
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
def switch_to_deploy(self):
"""Switch to deploy mode."""
if hasattr(self, 'rbr_reparam'):
return
kernel, bias = self.get_equivalent_kernel_bias()
self.rbr_reparam = nn.Conv2d(
in_channels=self.rbr_dense.conv.in_channels,
out_channels=self.rbr_dense.conv.out_channels,
kernel_size=self.rbr_dense.conv.kernel_size,
stride=self.rbr_dense.conv.stride,
padding=self.rbr_dense.conv.padding,
dilation=self.rbr_dense.conv.dilation,
groups=self.rbr_dense.conv.groups,
bias=True)
self.rbr_reparam.weight.data = kernel
self.rbr_reparam.bias.data = bias
for para in self.parameters():
para.detach_()
self.__delattr__('rbr_dense')
self.__delattr__('rbr_1x1')
if hasattr(self, 'rbr_identity'):
self.__delattr__('rbr_identity')
if hasattr(self, 'id_tensor'):
self.__delattr__('id_tensor')
self.deploy = True
@MODELS.register_module()
class BepC3StageBlock(nn.Module):
"""Beer-mug RepC3 Block.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
num_blocks (int): Number of blocks. Defaults to 1
hidden_ratio (float): Hidden channel expansion.
Default: 0.5
concat_all_layer (bool): Concat all layer when forward calculate.
Default: True
block_cfg (dict): Config dict for the block used to build each
layer. Defaults to dict(type='RepVGGBlock').
norm_cfg (ConfigType): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (ConfigType): Config dict for activation layer.
Defaults to dict(type='ReLU', inplace=True).
"""
def __init__(self,
in_channels: int,
out_channels: int,
num_blocks: int = 1,
hidden_ratio: float = 0.5,
concat_all_layer: bool = True,
block_cfg: ConfigType = dict(type='RepVGGBlock'),
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='ReLU', inplace=True)):
super().__init__()
hidden_channels = int(out_channels * hidden_ratio)
self.conv1 = ConvModule(
in_channels,
hidden_channels,
kernel_size=1,
stride=1,
groups=1,
bias=False,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
in_channels,
hidden_channels,
kernel_size=1,
stride=1,
groups=1,
bias=False,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv3 = ConvModule(
2 * hidden_channels,
out_channels,
kernel_size=1,
stride=1,
groups=1,
bias=False,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.block = RepStageBlock(
in_channels=hidden_channels,
out_channels=hidden_channels,
num_blocks=num_blocks,
block_cfg=block_cfg,
bottle_block=BottleRep)
self.concat_all_layer = concat_all_layer
if not concat_all_layer:
self.conv3 = ConvModule(
hidden_channels,
out_channels,
kernel_size=1,
stride=1,
groups=1,
bias=False,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x):
if self.concat_all_layer is True:
return self.conv3(
torch.cat((self.block(self.conv1(x)), self.conv2(x)), dim=1))
else:
return self.conv3(self.block(self.conv1(x)))
class BottleRep(nn.Module):
"""Bottle Rep Block.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
block_cfg (dict): Config dict for the block used to build each
layer. Defaults to dict(type='RepVGGBlock').
adaptive_weight (bool): Add adaptive_weight when forward calculate.
Defaults False.
"""
def __init__(self,
in_channels: int,
out_channels: int,
block_cfg: ConfigType = dict(type='RepVGGBlock'),
adaptive_weight: bool = False):
super().__init__()
conv1_cfg = block_cfg.copy()
conv2_cfg = block_cfg.copy()
conv1_cfg.update(
dict(in_channels=in_channels, out_channels=out_channels))
conv2_cfg.update(
dict(in_channels=out_channels, out_channels=out_channels))
self.conv1 = MODELS.build(conv1_cfg)
self.conv2 = MODELS.build(conv2_cfg)
if in_channels != out_channels:
self.shortcut = False
else:
self.shortcut = True
if adaptive_weight:
self.alpha = nn.Parameter(torch.ones(1))
else:
self.alpha = 1.0
def forward(self, x: Tensor) -> Tensor:
outputs = self.conv1(x)
outputs = self.conv2(outputs)
return outputs + self.alpha * x if self.shortcut else outputs
@MODELS.register_module()
class ConvWrapper(nn.Module):
"""Wrapper for normal Conv with SiLU activation.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple): Stride of the convolution. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): Conv bias. Default: True.
norm_cfg (ConfigType): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (ConfigType): Config dict for activation layer.
Defaults to dict(type='ReLU', inplace=True).
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int = 3,
stride: int = 1,
groups: int = 1,
bias: bool = True,
norm_cfg: ConfigType = None,
act_cfg: ConfigType = dict(type='SiLU')):
super().__init__()
self.block = ConvModule(
in_channels,
out_channels,
kernel_size,
stride,
padding=kernel_size // 2,
groups=groups,
bias=bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x: Tensor) -> Tensor:
return self.block(x)
@MODELS.register_module()
class EffectiveSELayer(nn.Module):
"""Effective Squeeze-Excitation.
From `CenterMask : Real-Time Anchor-Free Instance Segmentation`
arxiv (https://arxiv.org/abs/1911.06667)
This code referenced to
https://github.com/youngwanLEE/CenterMask/blob/72147e8aae673fcaf4103ee90a6a6b73863e7fa1/maskrcnn_benchmark/modeling/backbone/vovnet.py#L108-L121 # noqa
Args:
channels (int): The input and output channels of this Module.
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='HSigmoid').
"""
def __init__(self,
channels: int,
act_cfg: ConfigType = dict(type='HSigmoid')):
super().__init__()
assert isinstance(act_cfg, dict)
self.fc = ConvModule(channels, channels, 1, act_cfg=None)
act_cfg_ = act_cfg.copy() # type: ignore
self.activate = MODELS.build(act_cfg_)
def forward(self, x: Tensor) -> Tensor:
"""Forward process
Args:
x (Tensor): The input tensor.
"""
x_se = x.mean((2, 3), keepdim=True)
x_se = self.fc(x_se)
return x * self.activate(x_se)
class PPYOLOESELayer(nn.Module):
"""Squeeze-and-Excitation Attention Module for PPYOLOE.
There are some differences between the current implementation and
SELayer in mmdet:
1. For fast speed and avoiding double inference in ppyoloe,
use `F.adaptive_avg_pool2d` before PPYOLOESELayer.
2. Special ways to init weights.
3. Different convolution order.
Args:
feat_channels (int): The input (and output) channels of the SE layer.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.1, eps=1e-5).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
"""
def __init__(self,
feat_channels: int,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.1, eps=1e-5),
act_cfg: ConfigType = dict(type='SiLU', inplace=True)):
super().__init__()
self.fc = nn.Conv2d(feat_channels, feat_channels, 1)
self.sig = nn.Sigmoid()
self.conv = ConvModule(
feat_channels,
feat_channels,
1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self._init_weights()
def _init_weights(self):
"""Init weights."""
nn.init.normal_(self.fc.weight, mean=0, std=0.001)
def forward(self, feat: Tensor, avg_feat: Tensor) -> Tensor:
"""Forward process
Args:
feat (Tensor): The input tensor.
avg_feat (Tensor): Average pooling feature tensor.
"""
weight = self.sig(self.fc(avg_feat))
return self.conv(feat * weight)
@MODELS.register_module()
class ELANBlock(BaseModule):
"""Efficient layer aggregation networks for YOLOv7.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The out channels of this Module.
middle_ratio (float): The scaling ratio of the middle layer
based on the in_channels.
block_ratio (float): The scaling ratio of the block layer
based on the in_channels.
num_blocks (int): The number of blocks in the main branch.
Defaults to 2.
num_convs_in_block (int): The number of convs pre block.
Defaults to 1.
conv_cfg (dict): Config dict for convolution layer. Defaults to None.
which means using conv2d. Defaults to None.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
middle_ratio: float,
block_ratio: float,
num_blocks: int = 2,
num_convs_in_block: int = 1,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
assert num_blocks >= 1
assert num_convs_in_block >= 1
middle_channels = int(in_channels * middle_ratio)
block_channels = int(in_channels * block_ratio)
final_conv_in_channels = int(
num_blocks * block_channels) + 2 * middle_channels
self.main_conv = ConvModule(
in_channels,
middle_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.short_conv = ConvModule(
in_channels,
middle_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.blocks = nn.ModuleList()
for _ in range(num_blocks):
if num_convs_in_block == 1:
internal_block = ConvModule(
middle_channels,
block_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
internal_block = []
for _ in range(num_convs_in_block):
internal_block.append(
ConvModule(
middle_channels,
block_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
middle_channels = block_channels
internal_block = nn.Sequential(*internal_block)
middle_channels = block_channels
self.blocks.append(internal_block)
self.final_conv = ConvModule(
final_conv_in_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x: Tensor) -> Tensor:
"""Forward process
Args:
x (Tensor): The input tensor.
"""
x_short = self.short_conv(x)
x_main = self.main_conv(x)
block_outs = []
x_block = x_main
for block in self.blocks:
x_block = block(x_block)
block_outs.append(x_block)
x_final = torch.cat((*block_outs[::-1], x_main, x_short), dim=1)
return self.final_conv(x_final)
@MODELS.register_module()
class EELANBlock(BaseModule):
"""Expand efficient layer aggregation networks for YOLOv7.
Args:
num_elan_block (int): The number of ELANBlock.
"""
def __init__(self, num_elan_block: int, **kwargs):
super().__init__()
assert num_elan_block >= 1
self.e_elan_blocks = nn.ModuleList()
for _ in range(num_elan_block):
self.e_elan_blocks.append(ELANBlock(**kwargs))
def forward(self, x: Tensor) -> Tensor:
outs = []
for elan_blocks in self.e_elan_blocks:
outs.append(elan_blocks(x))
return sum(outs)
class MaxPoolAndStrideConvBlock(BaseModule):
"""Max pooling and stride conv layer for YOLOv7.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The out channels of this Module.
maxpool_kernel_sizes (int): kernel sizes of pooling layers.
Defaults to 2.
use_in_channels_of_middle (bool): Whether to calculate middle channels
based on in_channels. Defaults to False.
conv_cfg (dict): Config dict for convolution layer. Defaults to None.
which means using conv2d. Defaults to None.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
maxpool_kernel_sizes: int = 2,
use_in_channels_of_middle: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
middle_channels = in_channels if use_in_channels_of_middle \
else out_channels // 2
self.maxpool_branches = nn.Sequential(
MaxPool2d(
kernel_size=maxpool_kernel_sizes, stride=maxpool_kernel_sizes),
ConvModule(
in_channels,
out_channels // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.stride_conv_branches = nn.Sequential(
ConvModule(
in_channels,
middle_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
middle_channels,
out_channels // 2,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, x: Tensor) -> Tensor:
"""Forward process
Args:
x (Tensor): The input tensor.
"""
maxpool_out = self.maxpool_branches(x)
stride_conv_out = self.stride_conv_branches(x)
return torch.cat([stride_conv_out, maxpool_out], dim=1)
@MODELS.register_module()
class TinyDownSampleBlock(BaseModule):
"""Down sample layer for YOLOv7-tiny.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The out channels of this Module.
middle_ratio (float): The scaling ratio of the middle layer
based on the in_channels. Defaults to 1.0.
kernel_sizes (int, tuple[int]): Sequential or number of kernel
sizes of pooling layers. Defaults to 3.
conv_cfg (dict): Config dict for convolution layer. Defaults to None.
which means using conv2d. Defaults to None.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
middle_ratio: float = 1.0,
kernel_sizes: Union[int, Sequence[int]] = 3,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='LeakyReLU', negative_slope=0.1),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg)
middle_channels = int(in_channels * middle_ratio)
self.short_conv = ConvModule(
in_channels,
middle_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.main_convs = nn.ModuleList()
for i in range(3):
if i == 0:
self.main_convs.append(
ConvModule(
in_channels,
middle_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
else:
self.main_convs.append(
ConvModule(
middle_channels,
middle_channels,
kernel_sizes,
padding=(kernel_sizes - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.final_conv = ConvModule(
middle_channels * 4,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x) -> Tensor:
short_out = self.short_conv(x)
main_outs = []
for main_conv in self.main_convs:
main_out = main_conv(x)
main_outs.append(main_out)
x = main_out
return self.final_conv(torch.cat([*main_outs[::-1], short_out], dim=1))
@MODELS.register_module()
class SPPFCSPBlock(BaseModule):
"""Spatial pyramid pooling - Fast (SPPF) layer with CSP for
YOLOv7
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expand_ratio (float): Expand ratio of SPPCSPBlock.
Defaults to 0.5.
kernel_sizes (int, tuple[int]): Sequential or number of kernel
sizes of pooling layers. Defaults to 5.
is_tiny_version (bool): Is tiny version of SPPFCSPBlock. If True,
it means it is a yolov7 tiny model. Defaults to False.
conv_cfg (dict): Config dict for convolution layer. Defaults to None.
which means using conv2d. Defaults to None.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
in_channels: int,
out_channels: int,
expand_ratio: float = 0.5,
kernel_sizes: Union[int, Sequence[int]] = 5,
is_tiny_version: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
self.is_tiny_version = is_tiny_version
mid_channels = int(2 * out_channels * expand_ratio)
if is_tiny_version:
self.main_layers = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.main_layers = nn.Sequential(
ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
mid_channels,
mid_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
mid_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
)
self.kernel_sizes = kernel_sizes
if isinstance(kernel_sizes, int):
self.poolings = nn.MaxPool2d(
kernel_size=kernel_sizes, stride=1, padding=kernel_sizes // 2)
else:
self.poolings = nn.ModuleList([
nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
for ks in kernel_sizes
])
if is_tiny_version:
self.fuse_layers = ConvModule(
4 * mid_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
self.fuse_layers = nn.Sequential(
ConvModule(
4 * mid_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
mid_channels,
mid_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.short_layer = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.final_conv = ConvModule(
2 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def forward(self, x) -> Tensor:
"""Forward process
Args:
x (Tensor): The input tensor.
"""
x1 = self.main_layers(x)
if isinstance(self.kernel_sizes, int):
y1 = self.poolings(x1)
y2 = self.poolings(y1)
concat_list = [x1] + [y1, y2, self.poolings(y2)]
if self.is_tiny_version:
x1 = self.fuse_layers(torch.cat(concat_list[::-1], 1))
else:
x1 = self.fuse_layers(torch.cat(concat_list, 1))
else:
concat_list = [x1] + [m(x1) for m in self.poolings]
if self.is_tiny_version:
x1 = self.fuse_layers(torch.cat(concat_list[::-1], 1))
else:
x1 = self.fuse_layers(torch.cat(concat_list, 1))
x2 = self.short_layer(x)
return self.final_conv(torch.cat((x1, x2), dim=1))
class ImplicitA(nn.Module):
"""Implicit add layer in YOLOv7.
Args:
in_channels (int): The input channels of this Module.
mean (float): Mean value of implicit module. Defaults to 0.
std (float): Std value of implicit module. Defaults to 0.02
"""
def __init__(self, in_channels: int, mean: float = 0., std: float = .02):
super().__init__()
self.implicit = nn.Parameter(torch.zeros(1, in_channels, 1, 1))
nn.init.normal_(self.implicit, mean=mean, std=std)
def forward(self, x):
"""Forward process
Args:
x (Tensor): The input tensor.
"""
return self.implicit + x
class ImplicitM(nn.Module):
"""Implicit multiplier layer in YOLOv7.
Args:
in_channels (int): The input channels of this Module.
mean (float): Mean value of implicit module. Defaults to 1.
std (float): Std value of implicit module. Defaults to 0.02.
"""
def __init__(self, in_channels: int, mean: float = 1., std: float = .02):
super().__init__()
self.implicit = nn.Parameter(torch.ones(1, in_channels, 1, 1))
nn.init.normal_(self.implicit, mean=mean, std=std)
def forward(self, x):
"""Forward process
Args:
x (Tensor): The input tensor.
"""
return self.implicit * x
@MODELS.register_module()
class PPYOLOEBasicBlock(nn.Module):
"""PPYOLOE Backbone BasicBlock.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.1, eps=1e-5).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
shortcut (bool): Whether to add inputs and outputs together
at the end of this layer. Defaults to True.
use_alpha (bool): Whether to use `alpha` parameter at 1x1 conv.
"""
def __init__(self,
in_channels: int,
out_channels: int,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.1, eps=1e-5),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
shortcut: bool = True,
use_alpha: bool = False):
super().__init__()
assert act_cfg is None or isinstance(act_cfg, dict)
self.conv1 = ConvModule(
in_channels,
out_channels,
3,
stride=1,
padding=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = RepVGGBlock(
out_channels,
out_channels,
use_alpha=use_alpha,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
use_bn_first=False)
self.shortcut = shortcut
def forward(self, x: Tensor) -> Tensor:
"""Forward process.
Args:
inputs (Tensor): The input tensor.
Returns:
Tensor: The output tensor.
"""
y = self.conv1(x)
y = self.conv2(y)
if self.shortcut:
return x + y
else:
return y
class CSPResLayer(nn.Module):
"""PPYOLOE Backbone Stage.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
num_block (int): Number of blocks in this stage.
block_cfg (dict): Config dict for block. Default config is
suitable for PPYOLOE+ backbone. And in PPYOLOE neck,
block_cfg is set to dict(type='PPYOLOEBasicBlock',
shortcut=False, use_alpha=False). Defaults to
dict(type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True).
stride (int): Stride of the convolution. In backbone, the stride
must be set to 2. In neck, the stride must be set to 1.
Defaults to 1.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.1, eps=1e-5).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
attention_cfg (dict, optional): Config dict for `EffectiveSELayer`.
Defaults to dict(type='EffectiveSELayer',
act_cfg=dict(type='HSigmoid')).
use_spp (bool): Whether to use `SPPFBottleneck` layer.
Defaults to False.
"""
def __init__(self,
in_channels: int,
out_channels: int,
num_block: int,
block_cfg: ConfigType = dict(
type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True),
stride: int = 1,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.1, eps=1e-5),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
attention_cfg: OptMultiConfig = dict(
type='EffectiveSELayer', act_cfg=dict(type='HSigmoid')),
use_spp: bool = False):
super().__init__()
self.num_block = num_block
self.block_cfg = block_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.use_spp = use_spp
assert attention_cfg is None or isinstance(attention_cfg, dict)
if stride == 2:
conv1_in_channels = conv2_in_channels = conv3_in_channels = (
in_channels + out_channels) // 2
blocks_channels = conv1_in_channels // 2
self.conv_down = ConvModule(
in_channels,
conv1_in_channels,
3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
else:
conv1_in_channels = conv2_in_channels = in_channels
conv3_in_channels = out_channels
blocks_channels = out_channels // 2
self.conv_down = None
self.conv1 = ConvModule(
conv1_in_channels,
blocks_channels,
1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = ConvModule(
conv2_in_channels,
blocks_channels,
1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.blocks = self.build_blocks_layer(blocks_channels)
self.conv3 = ConvModule(
conv3_in_channels,
out_channels,
1,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if attention_cfg:
attention_cfg = attention_cfg.copy()
attention_cfg['channels'] = blocks_channels * 2
self.attn = MODELS.build(attention_cfg)
else:
self.attn = None
def build_blocks_layer(self, blocks_channels: int) -> nn.Module:
"""Build blocks layer.
Args:
blocks_channels: The channels of this Module.
"""
blocks = nn.Sequential()
block_cfg = self.block_cfg.copy()
block_cfg.update(
dict(in_channels=blocks_channels, out_channels=blocks_channels))
block_cfg.setdefault('norm_cfg', self.norm_cfg)
block_cfg.setdefault('act_cfg', self.act_cfg)
for i in range(self.num_block):
blocks.add_module(str(i), MODELS.build(block_cfg))
if i == (self.num_block - 1) // 2 and self.use_spp:
blocks.add_module(
'spp',
SPPFBottleneck(
blocks_channels,
blocks_channels,
kernel_sizes=[5, 9, 13],
use_conv_first=False,
conv_cfg=None,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
return blocks
def forward(self, x: Tensor) -> Tensor:
"""Forward process
Args:
x (Tensor): The input tensor.
"""
if self.conv_down is not None:
x = self.conv_down(x)
y1 = self.conv1(x)
y2 = self.blocks(self.conv2(x))
y = torch.cat([y1, y2], axis=1)
if self.attn is not None:
y = self.attn(y)
y = self.conv3(y)
return y
@MODELS.register_module()
class RepStageBlock(nn.Module):
"""RepStageBlock is a stage block with rep-style basic block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
num_blocks (int, tuple[int]): Number of blocks. Defaults to 1.
bottle_block (nn.Module): Basic unit of RepStage.
Defaults to RepVGGBlock.
block_cfg (ConfigType): Config of RepStage.
Defaults to 'RepVGGBlock'.
"""
def __init__(self,
in_channels: int,
out_channels: int,
num_blocks: int = 1,
bottle_block: nn.Module = RepVGGBlock,
block_cfg: ConfigType = dict(type='RepVGGBlock')):
super().__init__()
block_cfg = block_cfg.copy()
block_cfg.update(
dict(in_channels=in_channels, out_channels=out_channels))
self.conv1 = MODELS.build(block_cfg)
block_cfg.update(
dict(in_channels=out_channels, out_channels=out_channels))
self.block = None
if num_blocks > 1:
self.block = nn.Sequential(*(MODELS.build(block_cfg)
for _ in range(num_blocks - 1)))
if bottle_block == BottleRep:
self.conv1 = BottleRep(
in_channels,
out_channels,
block_cfg=block_cfg,
adaptive_weight=True)
num_blocks = num_blocks // 2
self.block = None
if num_blocks > 1:
self.block = nn.Sequential(*(BottleRep(
out_channels,
out_channels,
block_cfg=block_cfg,
adaptive_weight=True) for _ in range(num_blocks - 1)))
def forward(self, x: Tensor) -> Tensor:
"""Forward process.
Args:
x (Tensor): The input tensor.
Returns:
Tensor: The output tensor.
"""
x = self.conv1(x)
if self.block is not None:
x = self.block(x)
return x
class DarknetBottleneck(MMDET_DarknetBottleneck):
"""The basic bottleneck block used in Darknet.
Each ResBlock consists of two ConvModules and the input is added to the
final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.
The first convLayer has filter size of k1Xk1 and the second one has the
filter size of k2Xk2.
Note:
This DarknetBottleneck is little different from MMDet's, we can
change the kernel size and padding for each conv.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expansion (float): The kernel size for hidden channel.
Defaults to 0.5.
kernel_size (Sequence[int]): The kernel size of the convolution.
Defaults to (1, 3).
padding (Sequence[int]): The padding size of the convolution.
Defaults to (0, 1).
add_identity (bool): Whether to add identity to the out.
Defaults to True
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='Swish').
"""
def __init__(self,
in_channels: int,
out_channels: int,
expansion: float = 0.5,
kernel_size: Sequence[int] = (1, 3),
padding: Sequence[int] = (0, 1),
add_identity: bool = True,
use_depthwise: bool = False,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(in_channels, out_channels, init_cfg=init_cfg)
hidden_channels = int(out_channels * expansion)
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
assert isinstance(kernel_size, Sequence) and len(kernel_size) == 2
self.conv1 = ConvModule(
in_channels,
hidden_channels,
kernel_size[0],
padding=padding[0],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = conv(
hidden_channels,
out_channels,
kernel_size[1],
stride=1,
padding=padding[1],
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.add_identity = \
add_identity and in_channels == out_channels
class CSPLayerWithTwoConv(BaseModule):
"""Cross Stage Partial Layer with 2 convolutions.
Args:
in_channels (int): The input channels of the CSP layer.
out_channels (int): The output channels of the CSP layer.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Defaults to 0.5.
num_blocks (int): Number of blocks. Defaults to 1
add_identity (bool): Whether to add identity in blocks.
Defaults to True.
conv_cfg (dict, optional): Config dict for convolution layer.
Defaults to None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`], optional): Initialization config dict.
Defaults to None.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
expand_ratio: float = 0.5,
num_blocks: int = 1,
add_identity: bool = True, # shortcut
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None) -> None:
super().__init__(init_cfg=init_cfg)
self.mid_channels = int(out_channels * expand_ratio)
self.main_conv = ConvModule(
in_channels,
2 * self.mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.final_conv = ConvModule(
(2 + num_blocks) * self.mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.blocks = nn.ModuleList(
DarknetBottleneck(
self.mid_channels,
self.mid_channels,
expansion=1,
kernel_size=(3, 3),
padding=(1, 1),
add_identity=add_identity,
use_depthwise=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg) for _ in range(num_blocks))
def forward(self, x: Tensor) -> Tensor:
"""Forward process."""
x_main = self.main_conv(x)
x_main = list(x_main.split((self.mid_channels, self.mid_channels), 1))
x_main.extend(blocks(x_main[-1]) for blocks in self.blocks)
return self.final_conv(torch.cat(x_main, 1))
| 54,506 | 35.073461 | 156 | py |
mmyolo | mmyolo-main/mmyolo/models/layers/ema.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional
import torch
import torch.nn as nn
from mmdet.models.layers import ExpMomentumEMA as MMDET_ExpMomentumEMA
from torch import Tensor
from mmyolo.registry import MODELS
@MODELS.register_module()
class ExpMomentumEMA(MMDET_ExpMomentumEMA):
"""Exponential moving average (EMA) with exponential momentum strategy,
which is used in YOLO.
Args:
model (nn.Module): The model to be averaged.
momentum (float): The momentum used for updating ema parameter.
Ema's parameters are updated with the formula:
`averaged_param = (1-momentum) * averaged_param + momentum *
source_param`. Defaults to 0.0002.
gamma (int): Use a larger momentum early in training and gradually
annealing to a smaller value to update the ema model smoothly. The
momentum is calculated as
`(1 - momentum) * exp(-(1 + steps) / gamma) + momentum`.
Defaults to 2000.
interval (int): Interval between two updates. Defaults to 1.
device (torch.device, optional): If provided, the averaged model will
be stored on the :attr:`device`. Defaults to None.
update_buffers (bool): if True, it will compute running averages for
both the parameters and the buffers of the model. Defaults to
False.
"""
def __init__(self,
model: nn.Module,
momentum: float = 0.0002,
gamma: int = 2000,
interval=1,
device: Optional[torch.device] = None,
update_buffers: bool = False):
super().__init__(
model=model,
momentum=momentum,
interval=interval,
device=device,
update_buffers=update_buffers)
assert gamma > 0, f'gamma must be greater than 0, but got {gamma}'
self.gamma = gamma
# Note: There is no need to re-fetch every update,
# as most models do not change their structure
# during the training process.
self.src_parameters = (
model.state_dict()
if self.update_buffers else dict(model.named_parameters()))
if not self.update_buffers:
self.src_buffers = model.buffers()
def avg_func(self, averaged_param: Tensor, source_param: Tensor,
steps: int):
"""Compute the moving average of the parameters using the exponential
momentum strategy.
Args:
averaged_param (Tensor): The averaged parameters.
source_param (Tensor): The source parameters.
steps (int): The number of times the parameters have been
updated.
"""
momentum = (1 - self.momentum) * math.exp(
-float(1 + steps) / self.gamma) + self.momentum
averaged_param.lerp_(source_param, momentum)
def update_parameters(self, model: nn.Module):
"""Update the parameters after each training step.
Args:
model (nn.Module): The model of the parameter needs to be updated.
"""
if self.steps == 0:
for k, p_avg in self.avg_parameters.items():
p_avg.data.copy_(self.src_parameters[k].data)
elif self.steps % self.interval == 0:
for k, p_avg in self.avg_parameters.items():
if p_avg.dtype.is_floating_point:
self.avg_func(p_avg.data, self.src_parameters[k].data,
self.steps)
if not self.update_buffers:
# If not update the buffers,
# keep the buffers in sync with the source model.
for b_avg, b_src in zip(self.module.buffers(), self.src_buffers):
b_avg.data.copy_(b_src.data)
self.steps += 1
| 3,886 | 39.072165 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/rtmdet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, is_norm
from mmdet.models.task_modules.samplers import PseudoSampler
from mmdet.structures.bbox import distance2bbox
from mmdet.utils import (ConfigType, InstanceList, OptConfigType,
OptInstanceList, OptMultiConfig, reduce_mean)
from mmengine.model import (BaseModule, bias_init_with_prob, constant_init,
normal_init)
from torch import Tensor
from mmyolo.registry import MODELS, TASK_UTILS
from ..utils import gt_instances_preprocess
from .yolov5_head import YOLOv5Head
@MODELS.register_module()
class RTMDetSepBNHeadModule(BaseModule):
"""Detection Head of RTMDet.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_base_priors (int): The number of priors (points) at a point
on the feature grid. Defaults to 1.
feat_channels (int): Number of hidden channels. Used in child classes.
Defaults to 256
stacked_convs (int): Number of stacking convs of the head.
Defaults to 2.
featmap_strides (Sequence[int]): Downsample factor of each feature map.
Defaults to (8, 16, 32).
share_conv (bool): Whether to share conv layers between stages.
Defaults to True.
pred_kernel_size (int): Kernel size of ``nn.Conv2d``. Defaults to 1.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to ``dict(type='BN')``.
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Default: dict(type='SiLU', inplace=True).
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(
self,
num_classes: int,
in_channels: int,
widen_factor: float = 1.0,
num_base_priors: int = 1,
feat_channels: int = 256,
stacked_convs: int = 2,
featmap_strides: Sequence[int] = [8, 16, 32],
share_conv: bool = True,
pred_kernel_size: int = 1,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None,
):
super().__init__(init_cfg=init_cfg)
self.share_conv = share_conv
self.num_classes = num_classes
self.pred_kernel_size = pred_kernel_size
self.feat_channels = int(feat_channels * widen_factor)
self.stacked_convs = stacked_convs
self.num_base_priors = num_base_priors
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.featmap_strides = featmap_strides
self.in_channels = int(in_channels * widen_factor)
self._init_layers()
def _init_layers(self):
"""Initialize layers of the head."""
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.rtm_cls = nn.ModuleList()
self.rtm_reg = nn.ModuleList()
for n in range(len(self.featmap_strides)):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(reg_convs)
self.rtm_cls.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.num_classes,
self.pred_kernel_size,
padding=self.pred_kernel_size // 2))
self.rtm_reg.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=self.pred_kernel_size // 2))
if self.share_conv:
for n in range(len(self.featmap_strides)):
for i in range(self.stacked_convs):
self.cls_convs[n][i].conv = self.cls_convs[0][i].conv
self.reg_convs[n][i].conv = self.reg_convs[0][i].conv
def init_weights(self) -> None:
"""Initialize weights of the head."""
# Use prior in model initialization to improve stability
super().init_weights()
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
for rtm_cls, rtm_reg in zip(self.rtm_cls, self.rtm_reg):
normal_init(rtm_cls, std=0.01, bias=bias_cls)
normal_init(rtm_reg, std=0.01)
def forward(self, feats: Tuple[Tensor, ...]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * 4.
"""
cls_scores = []
bbox_preds = []
for idx, x in enumerate(feats):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs[idx]:
cls_feat = cls_layer(cls_feat)
cls_score = self.rtm_cls[idx](cls_feat)
for reg_layer in self.reg_convs[idx]:
reg_feat = reg_layer(reg_feat)
reg_dist = self.rtm_reg[idx](reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(reg_dist)
return tuple(cls_scores), tuple(bbox_preds)
@MODELS.register_module()
class RTMDetHead(YOLOv5Head):
"""RTMDet head.
Args:
head_module(ConfigType): Base module used for RTMDetHead
prior_generator: Points generator feature maps in
2D points-based detectors.
bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.MlvlPointGenerator',
offset=0,
strides=[8, 16, 32]),
bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'),
loss_cls: ConfigType = dict(
type='mmdet.QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='mmdet.GIoULoss', loss_weight=2.0),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
head_module=head_module,
prior_generator=prior_generator,
bbox_coder=bbox_coder,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes
else:
self.cls_out_channels = self.num_classes + 1
# rtmdet doesn't need loss_obj
self.loss_obj = None
def special_init(self):
"""Since YOLO series algorithms will inherit from YOLOv5Head, but
different algorithms have special initialization process.
The special_init function is designed to deal with this situation.
"""
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg.assigner)
if self.train_cfg.get('sampler', None) is not None:
self.sampler = TASK_UTILS.build(
self.train_cfg.sampler, default_args=dict(context=self))
else:
self.sampler = PseudoSampler(context=self)
self.featmap_sizes_train = None
self.flatten_priors_train = None
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions, and objectnesses.
"""
return self.head_module(x)
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Decoded box for each scale
level with shape (N, num_anchors * 4, H, W) in
[tl_x, tl_y, br_x, br_y] format.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(batch_img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
device = cls_scores[0].device
# If the shape does not equal, generate new one
if featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1).contiguous()
flatten_bboxes = torch.cat([
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
], 1)
flatten_bboxes = flatten_bboxes * self.flatten_priors_train[..., -1,
None]
flatten_bboxes = distance2bbox(self.flatten_priors_train[..., :2],
flatten_bboxes)
assigned_result = self.assigner(flatten_bboxes.detach(),
flatten_cls_scores.detach(),
self.flatten_priors_train, gt_labels,
gt_bboxes, pad_bbox_flag)
labels = assigned_result['assigned_labels'].reshape(-1)
label_weights = assigned_result['assigned_labels_weights'].reshape(-1)
bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 4)
assign_metrics = assigned_result['assign_metrics'].reshape(-1)
cls_preds = flatten_cls_scores.reshape(-1, self.num_classes)
bbox_preds = flatten_bboxes.reshape(-1, 4)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
avg_factor = reduce_mean(assign_metrics.sum()).clamp_(min=1).item()
loss_cls = self.loss_cls(
cls_preds, (labels, assign_metrics),
label_weights,
avg_factor=avg_factor)
if len(pos_inds) > 0:
loss_bbox = self.loss_bbox(
bbox_preds[pos_inds],
bbox_targets[pos_inds],
weight=assign_metrics[pos_inds],
avg_factor=avg_factor)
else:
loss_bbox = bbox_preds.sum() * 0
return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)
| 15,054 | 39.799458 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/yolov8_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import List, Sequence, Tuple, Union
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.utils import multi_apply
from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList,
OptMultiConfig)
from mmengine.dist import get_dist_info
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS, TASK_UTILS
from ..utils import gt_instances_preprocess, make_divisible
from .yolov5_head import YOLOv5Head
@MODELS.register_module()
class YOLOv8HeadModule(BaseModule):
"""YOLOv8HeadModule head module used in `YOLOv8`.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (Union[int, Sequence]): Number of channels in the input
feature map.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_base_priors (int): The number of priors (points) at a point
on the feature grid.
featmap_strides (Sequence[int]): Downsample factor of each feature map.
Defaults to [8, 16, 32].
reg_max (int): Max value of integral set :math: ``{0, ..., reg_max-1}``
in QFL setting. Defaults to 16.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
num_classes: int,
in_channels: Union[int, Sequence],
widen_factor: float = 1.0,
num_base_priors: int = 1,
featmap_strides: Sequence[int] = (8, 16, 32),
reg_max: int = 16,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.featmap_strides = featmap_strides
self.num_levels = len(self.featmap_strides)
self.num_base_priors = num_base_priors
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.in_channels = in_channels
self.reg_max = reg_max
in_channels = []
for channel in self.in_channels:
channel = make_divisible(channel, widen_factor)
in_channels.append(channel)
self.in_channels = in_channels
self._init_layers()
def init_weights(self, prior_prob=0.01):
"""Initialize the weight and bias of PPYOLOE head."""
super().init_weights()
for reg_pred, cls_pred, stride in zip(self.reg_preds, self.cls_preds,
self.featmap_strides):
reg_pred[-1].bias.data[:] = 1.0 # box
# cls (.01 objects, 80 classes, 640 img)
cls_pred[-1].bias.data[:self.num_classes] = math.log(
5 / self.num_classes / (640 / stride)**2)
def _init_layers(self):
"""initialize conv layers in YOLOv8 head."""
# Init decouple head
self.cls_preds = nn.ModuleList()
self.reg_preds = nn.ModuleList()
reg_out_channels = max(
(16, self.in_channels[0] // 4, self.reg_max * 4))
cls_out_channels = max(self.in_channels[0], self.num_classes)
for i in range(self.num_levels):
self.reg_preds.append(
nn.Sequential(
ConvModule(
in_channels=self.in_channels[i],
out_channels=reg_out_channels,
kernel_size=3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
in_channels=reg_out_channels,
out_channels=reg_out_channels,
kernel_size=3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
nn.Conv2d(
in_channels=reg_out_channels,
out_channels=4 * self.reg_max,
kernel_size=1)))
self.cls_preds.append(
nn.Sequential(
ConvModule(
in_channels=self.in_channels[i],
out_channels=cls_out_channels,
kernel_size=3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
in_channels=cls_out_channels,
out_channels=cls_out_channels,
kernel_size=3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
nn.Conv2d(
in_channels=cls_out_channels,
out_channels=self.num_classes,
kernel_size=1)))
proj = torch.arange(self.reg_max, dtype=torch.float)
self.register_buffer('proj', proj, persistent=False)
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions
"""
assert len(x) == self.num_levels
return multi_apply(self.forward_single, x, self.cls_preds,
self.reg_preds)
def forward_single(self, x: torch.Tensor, cls_pred: nn.ModuleList,
reg_pred: nn.ModuleList) -> Tuple:
"""Forward feature of a single scale level."""
b, _, h, w = x.shape
cls_logit = cls_pred(x)
bbox_dist_preds = reg_pred(x)
if self.reg_max > 1:
bbox_dist_preds = bbox_dist_preds.reshape(
[-1, 4, self.reg_max, h * w]).permute(0, 3, 1, 2)
# TODO: The get_flops script cannot handle the situation of
# matmul, and needs to be fixed later
# bbox_preds = bbox_dist_preds.softmax(3).matmul(self.proj)
bbox_preds = bbox_dist_preds.softmax(3).matmul(
self.proj.view([-1, 1])).squeeze(-1)
bbox_preds = bbox_preds.transpose(1, 2).reshape(b, -1, h, w)
else:
bbox_preds = bbox_dist_preds
if self.training:
return cls_logit, bbox_preds, bbox_dist_preds
else:
return cls_logit, bbox_preds
@MODELS.register_module()
class YOLOv8Head(YOLOv5Head):
"""YOLOv8Head head used in `YOLOv8`.
Args:
head_module(:obj:`ConfigDict` or dict): Base module used for YOLOv8Head
prior_generator(dict): Points generator feature maps
in 2D points-based detectors.
bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_dfl (:obj:`ConfigDict` or dict): Config of Distribution Focal
Loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.MlvlPointGenerator',
offset=0.5,
strides=[8, 16, 32]),
bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'),
loss_cls: ConfigType = dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='none',
loss_weight=0.5),
loss_bbox: ConfigType = dict(
type='IoULoss',
iou_mode='ciou',
bbox_format='xyxy',
reduction='sum',
loss_weight=7.5,
return_iou=False),
loss_dfl=dict(
type='mmdet.DistributionFocalLoss',
reduction='mean',
loss_weight=1.5 / 4),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
head_module=head_module,
prior_generator=prior_generator,
bbox_coder=bbox_coder,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
self.loss_dfl = MODELS.build(loss_dfl)
# YOLOv8 doesn't need loss_obj
self.loss_obj = None
def special_init(self):
"""Since YOLO series algorithms will inherit from YOLOv5Head, but
different algorithms have special initialization process.
The special_init function is designed to deal with this situation.
"""
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg.assigner)
# Add common attributes to reduce calculation
self.featmap_sizes_train = None
self.num_level_priors = None
self.flatten_priors_train = None
self.stride_tensor = None
def loss_by_feat(
self,
cls_scores: Sequence[Tensor],
bbox_preds: Sequence[Tensor],
bbox_dist_preds: Sequence[Tensor],
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
bbox_dist_preds (Sequence[Tensor]): Box distribution logits for
each scale level with shape (bs, reg_max + 1, H*W, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
num_imgs = len(batch_img_metas)
current_featmap_sizes = [
cls_score.shape[2:] for cls_score in cls_scores
]
# If the shape does not equal, generate new one
if current_featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = current_featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
self.featmap_sizes_train,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
self.num_level_priors = [len(n) for n in mlvl_priors_with_stride]
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
self.stride_tensor = self.flatten_priors_train[..., [2]]
# gt info
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
# pred info
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_pred in cls_scores
]
flatten_pred_bboxes = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
# (bs, n, 4 * reg_max)
flatten_pred_dists = [
bbox_pred_org.reshape(num_imgs, -1, self.head_module.reg_max * 4)
for bbox_pred_org in bbox_dist_preds
]
flatten_dist_preds = torch.cat(flatten_pred_dists, dim=1)
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1)
flatten_pred_bboxes = self.bbox_coder.decode(
self.flatten_priors_train[..., :2], flatten_pred_bboxes,
self.stride_tensor[..., 0])
assigned_result = self.assigner(
(flatten_pred_bboxes.detach()).type(gt_bboxes.dtype),
flatten_cls_preds.detach().sigmoid(), self.flatten_priors_train,
gt_labels, gt_bboxes, pad_bbox_flag)
assigned_bboxes = assigned_result['assigned_bboxes']
assigned_scores = assigned_result['assigned_scores']
fg_mask_pre_prior = assigned_result['fg_mask_pre_prior']
assigned_scores_sum = assigned_scores.sum().clamp(min=1)
loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores).sum()
loss_cls /= assigned_scores_sum
# rescale bbox
assigned_bboxes /= self.stride_tensor
flatten_pred_bboxes /= self.stride_tensor
# select positive samples mask
num_pos = fg_mask_pre_prior.sum()
if num_pos > 0:
# when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox
# will not report an error
# iou loss
prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4])
pred_bboxes_pos = torch.masked_select(
flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4])
assigned_bboxes_pos = torch.masked_select(
assigned_bboxes, prior_bbox_mask).reshape([-1, 4])
bbox_weight = torch.masked_select(
assigned_scores.sum(-1), fg_mask_pre_prior).unsqueeze(-1)
loss_bbox = self.loss_bbox(
pred_bboxes_pos, assigned_bboxes_pos,
weight=bbox_weight) / assigned_scores_sum
# dfl loss
pred_dist_pos = flatten_dist_preds[fg_mask_pre_prior]
assigned_ltrb = self.bbox_coder.encode(
self.flatten_priors_train[..., :2] / self.stride_tensor,
assigned_bboxes,
max_dis=self.head_module.reg_max - 1,
eps=0.01)
assigned_ltrb_pos = torch.masked_select(
assigned_ltrb, prior_bbox_mask).reshape([-1, 4])
loss_dfl = self.loss_dfl(
pred_dist_pos.reshape(-1, self.head_module.reg_max),
assigned_ltrb_pos.reshape(-1),
weight=bbox_weight.expand(-1, 4).reshape(-1),
avg_factor=assigned_scores_sum)
else:
loss_bbox = flatten_pred_bboxes.sum() * 0
loss_dfl = flatten_pred_bboxes.sum() * 0
_, world_size = get_dist_info()
return dict(
loss_cls=loss_cls * num_imgs * world_size,
loss_bbox=loss_bbox * num_imgs * world_size,
loss_dfl=loss_dfl * num_imgs * world_size)
| 16,795 | 41.307305 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/yolov6_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Sequence, Tuple, Union
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.utils import multi_apply
from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList,
OptMultiConfig)
from mmengine import MessageHub
from mmengine.dist import get_dist_info
from mmengine.model import BaseModule, bias_init_with_prob
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS, TASK_UTILS
from ..utils import gt_instances_preprocess
from .yolov5_head import YOLOv5Head
@MODELS.register_module()
class YOLOv6HeadModule(BaseModule):
"""YOLOv6Head head module used in `YOLOv6.
<https://arxiv.org/pdf/2209.02976>`_.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (Union[int, Sequence]): Number of channels in the input
feature map.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_base_priors: (int): The number of priors (points) at a point
on the feature grid.
featmap_strides (Sequence[int]): Downsample factor of each feature map.
Defaults to [8, 16, 32].
None, otherwise False. Defaults to "auto".
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
num_classes: int,
in_channels: Union[int, Sequence],
widen_factor: float = 1.0,
num_base_priors: int = 1,
featmap_strides: Sequence[int] = (8, 16, 32),
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.featmap_strides = featmap_strides
self.num_levels = len(self.featmap_strides)
self.num_base_priors = num_base_priors
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
if isinstance(in_channels, int):
self.in_channels = [int(in_channels * widen_factor)
] * self.num_levels
else:
self.in_channels = [int(i * widen_factor) for i in in_channels]
self._init_layers()
def _init_layers(self):
"""initialize conv layers in YOLOv6 head."""
# Init decouple head
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.cls_preds = nn.ModuleList()
self.reg_preds = nn.ModuleList()
self.stems = nn.ModuleList()
for i in range(self.num_levels):
self.stems.append(
ConvModule(
in_channels=self.in_channels[i],
out_channels=self.in_channels[i],
kernel_size=1,
stride=1,
padding=1 // 2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.cls_convs.append(
ConvModule(
in_channels=self.in_channels[i],
out_channels=self.in_channels[i],
kernel_size=3,
stride=1,
padding=3 // 2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.reg_convs.append(
ConvModule(
in_channels=self.in_channels[i],
out_channels=self.in_channels[i],
kernel_size=3,
stride=1,
padding=3 // 2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.cls_preds.append(
nn.Conv2d(
in_channels=self.in_channels[i],
out_channels=self.num_base_priors * self.num_classes,
kernel_size=1))
self.reg_preds.append(
nn.Conv2d(
in_channels=self.in_channels[i],
out_channels=self.num_base_priors * 4,
kernel_size=1))
def init_weights(self):
super().init_weights()
bias_init = bias_init_with_prob(0.01)
for conv in self.cls_preds:
conv.bias.data.fill_(bias_init)
conv.weight.data.fill_(0.)
for conv in self.reg_preds:
conv.bias.data.fill_(1.0)
conv.weight.data.fill_(0.)
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions.
"""
assert len(x) == self.num_levels
return multi_apply(self.forward_single, x, self.stems, self.cls_convs,
self.cls_preds, self.reg_convs, self.reg_preds)
def forward_single(self, x: Tensor, stem: nn.Module, cls_conv: nn.Module,
cls_pred: nn.Module, reg_conv: nn.Module,
reg_pred: nn.Module) -> Tuple[Tensor, Tensor]:
"""Forward feature of a single scale level."""
y = stem(x)
cls_x = y
reg_x = y
cls_feat = cls_conv(cls_x)
reg_feat = reg_conv(reg_x)
cls_score = cls_pred(cls_feat)
bbox_pred = reg_pred(reg_feat)
return cls_score, bbox_pred
@MODELS.register_module()
class YOLOv6Head(YOLOv5Head):
"""YOLOv6Head head used in `YOLOv6 <https://arxiv.org/pdf/2209.02976>`_.
Args:
head_module(ConfigType): Base module used for YOLOv6Head
prior_generator(dict): Points generator feature maps
in 2D points-based detectors.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.MlvlPointGenerator',
offset=0.5,
strides=[8, 16, 32]),
bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'),
loss_cls: ConfigType = dict(
type='mmdet.VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='sum',
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='IoULoss',
iou_mode='giou',
bbox_format='xyxy',
reduction='mean',
loss_weight=2.5,
return_iou=False),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
head_module=head_module,
prior_generator=prior_generator,
bbox_coder=bbox_coder,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
# yolov6 doesn't need loss_obj
self.loss_obj = None
def special_init(self):
"""Since YOLO series algorithms will inherit from YOLOv5Head, but
different algorithms have special initialization process.
The special_init function is designed to deal with this situation.
"""
if self.train_cfg:
self.initial_epoch = self.train_cfg['initial_epoch']
self.initial_assigner = TASK_UTILS.build(
self.train_cfg.initial_assigner)
self.assigner = TASK_UTILS.build(self.train_cfg.assigner)
# Add common attributes to reduce calculation
self.featmap_sizes_train = None
self.num_level_priors = None
self.flatten_priors_train = None
self.stride_tensor = None
def loss_by_feat(
self,
cls_scores: Sequence[Tensor],
bbox_preds: Sequence[Tensor],
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
# get epoch information from message hub
message_hub = MessageHub.get_current_instance()
current_epoch = message_hub.get_info('epoch')
num_imgs = len(batch_img_metas)
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
current_featmap_sizes = [
cls_score.shape[2:] for cls_score in cls_scores
]
# If the shape does not equal, generate new one
if current_featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = current_featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
self.featmap_sizes_train,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
self.num_level_priors = [len(n) for n in mlvl_priors_with_stride]
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
self.stride_tensor = self.flatten_priors_train[..., [2]]
# gt info
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
# pred info
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_pred in cls_scores
]
flatten_pred_bboxes = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1)
flatten_pred_bboxes = self.bbox_coder.decode(
self.flatten_priors_train[..., :2], flatten_pred_bboxes,
self.stride_tensor[:, 0])
pred_scores = torch.sigmoid(flatten_cls_preds)
if current_epoch < self.initial_epoch:
assigned_result = self.initial_assigner(
flatten_pred_bboxes.detach(), self.flatten_priors_train,
self.num_level_priors, gt_labels, gt_bboxes, pad_bbox_flag)
else:
assigned_result = self.assigner(flatten_pred_bboxes.detach(),
pred_scores.detach(),
self.flatten_priors_train,
gt_labels, gt_bboxes,
pad_bbox_flag)
assigned_bboxes = assigned_result['assigned_bboxes']
assigned_scores = assigned_result['assigned_scores']
fg_mask_pre_prior = assigned_result['fg_mask_pre_prior']
# cls loss
with torch.cuda.amp.autocast(enabled=False):
loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores)
# rescale bbox
assigned_bboxes /= self.stride_tensor
flatten_pred_bboxes /= self.stride_tensor
# TODO: Add all_reduce makes training more stable
assigned_scores_sum = assigned_scores.sum()
if assigned_scores_sum > 0:
loss_cls /= assigned_scores_sum
# select positive samples mask
num_pos = fg_mask_pre_prior.sum()
if num_pos > 0:
# when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox
# will not report an error
# iou loss
prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4])
pred_bboxes_pos = torch.masked_select(
flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4])
assigned_bboxes_pos = torch.masked_select(
assigned_bboxes, prior_bbox_mask).reshape([-1, 4])
bbox_weight = torch.masked_select(
assigned_scores.sum(-1), fg_mask_pre_prior).unsqueeze(-1)
loss_bbox = self.loss_bbox(
pred_bboxes_pos,
assigned_bboxes_pos,
weight=bbox_weight,
avg_factor=assigned_scores_sum)
else:
loss_bbox = flatten_pred_bboxes.sum() * 0
_, world_size = get_dist_info()
return dict(
loss_cls=loss_cls * world_size, loss_bbox=loss_bbox * world_size)
| 15,037 | 39.643243 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/yolox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.task_modules.samplers import PseudoSampler
from mmdet.models.utils import multi_apply
from mmdet.structures.bbox import bbox_xyxy_to_cxcywh
from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList,
OptMultiConfig, reduce_mean)
from mmengine.model import BaseModule, bias_init_with_prob
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS, TASK_UTILS
from .yolov5_head import YOLOv5Head
@MODELS.register_module()
class YOLOXHeadModule(BaseModule):
"""YOLOXHead head module used in `YOLOX.
`<https://arxiv.org/abs/2107.08430>`_
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (Union[int, Sequence]): Number of channels in the input
feature map.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_base_priors (int): The number of priors (points) at a point
on the feature grid
stacked_convs (int): Number of stacking convs of the head.
Defaults to 2.
featmap_strides (Sequence[int]): Downsample factor of each feature map.
Defaults to [8, 16, 32].
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Defaults to False.
dcn_on_last_conv (bool): If true, use dcn in the last layer of
towers. Defaults to False.
conv_bias (bool or str): If specified as `auto`, it will be decided by
the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
None, otherwise False. Defaults to "auto".
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(
self,
num_classes: int,
in_channels: Union[int, Sequence],
widen_factor: float = 1.0,
num_base_priors: int = 1,
feat_channels: int = 256,
stacked_convs: int = 2,
featmap_strides: Sequence[int] = [8, 16, 32],
use_depthwise: bool = False,
dcn_on_last_conv: bool = False,
conv_bias: Union[bool, str] = 'auto',
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None,
):
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.feat_channels = int(feat_channels * widen_factor)
self.stacked_convs = stacked_convs
self.use_depthwise = use_depthwise
self.dcn_on_last_conv = dcn_on_last_conv
assert conv_bias == 'auto' or isinstance(conv_bias, bool)
self.conv_bias = conv_bias
self.num_base_priors = num_base_priors
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.featmap_strides = featmap_strides
if isinstance(in_channels, int):
in_channels = int(in_channels * widen_factor)
self.in_channels = in_channels
self._init_layers()
def _init_layers(self):
"""Initialize heads for all level feature maps."""
self.multi_level_cls_convs = nn.ModuleList()
self.multi_level_reg_convs = nn.ModuleList()
self.multi_level_conv_cls = nn.ModuleList()
self.multi_level_conv_reg = nn.ModuleList()
self.multi_level_conv_obj = nn.ModuleList()
for _ in self.featmap_strides:
self.multi_level_cls_convs.append(self._build_stacked_convs())
self.multi_level_reg_convs.append(self._build_stacked_convs())
conv_cls, conv_reg, conv_obj = self._build_predictor()
self.multi_level_conv_cls.append(conv_cls)
self.multi_level_conv_reg.append(conv_reg)
self.multi_level_conv_obj.append(conv_obj)
def _build_stacked_convs(self) -> nn.Sequential:
"""Initialize conv layers of a single level head."""
conv = DepthwiseSeparableConvModule \
if self.use_depthwise else ConvModule
stacked_convs = []
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
if self.dcn_on_last_conv and i == self.stacked_convs - 1:
conv_cfg = dict(type='DCNv2')
else:
conv_cfg = self.conv_cfg
stacked_convs.append(
conv(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
bias=self.conv_bias))
return nn.Sequential(*stacked_convs)
def _build_predictor(self) -> Tuple[nn.Module, nn.Module, nn.Module]:
"""Initialize predictor layers of a single level head."""
conv_cls = nn.Conv2d(self.feat_channels, self.num_classes, 1)
conv_reg = nn.Conv2d(self.feat_channels, 4, 1)
conv_obj = nn.Conv2d(self.feat_channels, 1, 1)
return conv_cls, conv_reg, conv_obj
def init_weights(self):
"""Initialize weights of the head."""
# Use prior in model initialization to improve stability
super().init_weights()
bias_init = bias_init_with_prob(0.01)
for conv_cls, conv_obj in zip(self.multi_level_conv_cls,
self.multi_level_conv_obj):
conv_cls.bias.data.fill_(bias_init)
conv_obj.bias.data.fill_(bias_init)
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions, and objectnesses.
"""
return multi_apply(self.forward_single, x, self.multi_level_cls_convs,
self.multi_level_reg_convs,
self.multi_level_conv_cls,
self.multi_level_conv_reg,
self.multi_level_conv_obj)
def forward_single(self, x: Tensor, cls_convs: nn.Module,
reg_convs: nn.Module, conv_cls: nn.Module,
conv_reg: nn.Module,
conv_obj: nn.Module) -> Tuple[Tensor, Tensor, Tensor]:
"""Forward feature of a single scale level."""
cls_feat = cls_convs(x)
reg_feat = reg_convs(x)
cls_score = conv_cls(cls_feat)
bbox_pred = conv_reg(reg_feat)
objectness = conv_obj(reg_feat)
return cls_score, bbox_pred, objectness
@MODELS.register_module()
class YOLOXHead(YOLOv5Head):
"""YOLOXHead head used in `YOLOX <https://arxiv.org/abs/2107.08430>`_.
Args:
head_module(ConfigType): Base module used for YOLOXHead
prior_generator: Points generator feature maps in
2D points-based detectors.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_obj (:obj:`ConfigDict` or dict): Config of objectness loss.
loss_bbox_aux (:obj:`ConfigDict` or dict): Config of bbox aux loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.MlvlPointGenerator',
offset=0,
strides=[8, 16, 32]),
bbox_coder: ConfigType = dict(type='YOLOXBBoxCoder'),
loss_cls: ConfigType = dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='sum',
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='mmdet.IoULoss',
mode='square',
eps=1e-16,
reduction='sum',
loss_weight=5.0),
loss_obj: ConfigType = dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='sum',
loss_weight=1.0),
loss_bbox_aux: ConfigType = dict(
type='mmdet.L1Loss', reduction='sum', loss_weight=1.0),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
self.use_bbox_aux = False
self.loss_bbox_aux = loss_bbox_aux
super().__init__(
head_module=head_module,
prior_generator=prior_generator,
bbox_coder=bbox_coder,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_obj=loss_obj,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
def special_init(self):
"""Since YOLO series algorithms will inherit from YOLOv5Head, but
different algorithms have special initialization process.
The special_init function is designed to deal with this situation.
"""
self.loss_bbox_aux: nn.Module = MODELS.build(self.loss_bbox_aux)
if self.train_cfg:
self.assigner = TASK_UTILS.build(self.train_cfg.assigner)
# YOLOX does not support sampling
self.sampler = PseudoSampler()
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
return self.head_module(x)
def loss_by_feat(
self,
cls_scores: Sequence[Tensor],
bbox_preds: Sequence[Tensor],
objectnesses: Sequence[Tensor],
batch_gt_instances: Tensor,
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
num_imgs = len(batch_img_metas)
if batch_gt_instances_ignore is None:
batch_gt_instances_ignore = [None] * num_imgs
batch_gt_instances = self.gt_instances_preprocess(
batch_gt_instances, len(batch_img_metas))
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_pred in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_objectness = torch.cat(flatten_objectness, dim=1)
flatten_priors = torch.cat(mlvl_priors)
flatten_bboxes = self.bbox_coder.decode(flatten_priors[..., :2],
flatten_bbox_preds,
flatten_priors[..., 2])
(pos_masks, cls_targets, obj_targets, bbox_targets, bbox_aux_target,
num_fg_imgs) = multi_apply(
self._get_targets_single,
flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1),
flatten_cls_preds.detach(), flatten_bboxes.detach(),
flatten_objectness.detach(), batch_gt_instances, batch_img_metas,
batch_gt_instances_ignore)
# The experimental results show that 'reduce_mean' can improve
# performance on the COCO dataset.
num_pos = torch.tensor(
sum(num_fg_imgs),
dtype=torch.float,
device=flatten_cls_preds.device)
num_total_samples = max(reduce_mean(num_pos), 1.0)
pos_masks = torch.cat(pos_masks, 0)
cls_targets = torch.cat(cls_targets, 0)
obj_targets = torch.cat(obj_targets, 0)
bbox_targets = torch.cat(bbox_targets, 0)
if self.use_bbox_aux:
bbox_aux_target = torch.cat(bbox_aux_target, 0)
loss_obj = self.loss_obj(flatten_objectness.view(-1, 1),
obj_targets) / num_total_samples
if num_pos > 0:
loss_cls = self.loss_cls(
flatten_cls_preds.view(-1, self.num_classes)[pos_masks],
cls_targets) / num_total_samples
loss_bbox = self.loss_bbox(
flatten_bboxes.view(-1, 4)[pos_masks],
bbox_targets) / num_total_samples
else:
# Avoid cls and reg branch not participating in the gradient
# propagation when there is no ground-truth in the images.
# For more details, please refer to
# https://github.com/open-mmlab/mmdetection/issues/7298
loss_cls = flatten_cls_preds.sum() * 0
loss_bbox = flatten_bboxes.sum() * 0
loss_dict = dict(
loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj)
if self.use_bbox_aux:
if num_pos > 0:
loss_bbox_aux = self.loss_bbox_aux(
flatten_bbox_preds.view(-1, 4)[pos_masks],
bbox_aux_target) / num_total_samples
else:
# Avoid cls and reg branch not participating in the gradient
# propagation when there is no ground-truth in the images.
# For more details, please refer to
# https://github.com/open-mmlab/mmdetection/issues/7298
loss_bbox_aux = flatten_bbox_preds.sum() * 0
loss_dict.update(loss_bbox_aux=loss_bbox_aux)
return loss_dict
@torch.no_grad()
def _get_targets_single(
self,
priors: Tensor,
cls_preds: Tensor,
decoded_bboxes: Tensor,
objectness: Tensor,
gt_instances: InstanceData,
img_meta: dict,
gt_instances_ignore: Optional[InstanceData] = None) -> tuple:
"""Compute classification, regression, and objectness targets for
priors in a single image.
Args:
priors (Tensor): All priors of one image, a 2D-Tensor with shape
[num_priors, 4] in [cx, xy, stride_w, stride_y] format.
cls_preds (Tensor): Classification predictions of one image,
a 2D-Tensor with shape [num_priors, num_classes]
decoded_bboxes (Tensor): Decoded bboxes predictions of one image,
a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y,
br_x, br_y] format.
objectness (Tensor): Objectness predictions of one image,
a 1D-Tensor with shape [num_priors]
gt_instances (:obj:`InstanceData`): Ground truth of instance
annotations. It should includes ``bboxes`` and ``labels``
attributes.
img_meta (dict): Meta information for current image.
gt_instances_ignore (:obj:`InstanceData`, optional): Instances
to be ignored during training. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
tuple:
foreground_mask (list[Tensor]): Binary mask of foreground
targets.
cls_target (list[Tensor]): Classification targets of an image.
obj_target (list[Tensor]): Objectness targets of an image.
bbox_target (list[Tensor]): BBox targets of an image.
bbox_aux_target (int): BBox aux targets of an image.
num_pos_per_img (int): Number of positive samples in an image.
"""
num_priors = priors.size(0)
num_gts = len(gt_instances)
# No target
if num_gts == 0:
cls_target = cls_preds.new_zeros((0, self.num_classes))
bbox_target = cls_preds.new_zeros((0, 4))
bbox_aux_target = cls_preds.new_zeros((0, 4))
obj_target = cls_preds.new_zeros((num_priors, 1))
foreground_mask = cls_preds.new_zeros(num_priors).bool()
return (foreground_mask, cls_target, obj_target, bbox_target,
bbox_aux_target, 0)
# YOLOX uses center priors with 0.5 offset to assign targets,
# but use center priors without offset to regress bboxes.
offset_priors = torch.cat(
[priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1)
scores = cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid()
pred_instances = InstanceData(
bboxes=decoded_bboxes, scores=scores.sqrt_(), priors=offset_priors)
assign_result = self.assigner.assign(
pred_instances=pred_instances,
gt_instances=gt_instances,
gt_instances_ignore=gt_instances_ignore)
sampling_result = self.sampler.sample(assign_result, pred_instances,
gt_instances)
pos_inds = sampling_result.pos_inds
num_pos_per_img = pos_inds.size(0)
pos_ious = assign_result.max_overlaps[pos_inds]
# IOU aware classification score
cls_target = F.one_hot(sampling_result.pos_gt_labels,
self.num_classes) * pos_ious.unsqueeze(-1)
obj_target = torch.zeros_like(objectness).unsqueeze(-1)
obj_target[pos_inds] = 1
bbox_target = sampling_result.pos_gt_bboxes
bbox_aux_target = cls_preds.new_zeros((num_pos_per_img, 4))
if self.use_bbox_aux:
bbox_aux_target = self._get_bbox_aux_target(
bbox_aux_target, bbox_target, priors[pos_inds])
foreground_mask = torch.zeros_like(objectness).to(torch.bool)
foreground_mask[pos_inds] = 1
return (foreground_mask, cls_target, obj_target, bbox_target,
bbox_aux_target, num_pos_per_img)
def _get_bbox_aux_target(self,
bbox_aux_target: Tensor,
gt_bboxes: Tensor,
priors: Tensor,
eps: float = 1e-8) -> Tensor:
"""Convert gt bboxes to center offset and log width height."""
gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes)
bbox_aux_target[:, :2] = (gt_cxcywh[:, :2] -
priors[:, :2]) / priors[:, 2:]
bbox_aux_target[:,
2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps)
return bbox_aux_target
@staticmethod
def gt_instances_preprocess(batch_gt_instances: Tensor,
batch_size: int) -> List[InstanceData]:
"""Split batch_gt_instances with batch size.
Args:
batch_gt_instances (Tensor): Ground truth
a 2D-Tensor for whole batch, shape [all_gt_bboxes, 6]
batch_size (int): Batch size.
Returns:
List: batch gt instances data, shape [batch_size, InstanceData]
"""
# faster version
batch_instance_list = []
for i in range(batch_size):
batch_gt_instance_ = InstanceData()
single_batch_instance = \
batch_gt_instances[batch_gt_instances[:, 0] == i, :]
batch_gt_instance_.bboxes = single_batch_instance[:, 2:]
batch_gt_instance_.labels = single_batch_instance[:, 1]
batch_instance_list.append(batch_gt_instance_)
return batch_instance_list
| 22,508 | 42.706796 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/rtmdet_ins_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, is_norm
from mmcv.ops import batched_nms
from mmdet.models.utils import filter_scores_and_topk
from mmdet.structures.bbox import get_box_tensor, get_box_wh, scale_boxes
from mmdet.utils import (ConfigType, InstanceList, OptConfigType,
OptInstanceList, OptMultiConfig)
from mmengine import ConfigDict
from mmengine.model import (BaseModule, bias_init_with_prob, constant_init,
normal_init)
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS
from .rtmdet_head import RTMDetHead, RTMDetSepBNHeadModule
class MaskFeatModule(BaseModule):
"""Mask feature head used in RTMDet-Ins. Copy from mmdet.
Args:
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of hidden channels of the mask feature
map branch.
stacked_convs (int): Number of convs in mask feature branch.
num_levels (int): The starting feature map level from RPN that
will be used to predict the mask feature map.
num_prototypes (int): Number of output channel of the mask feature
map branch. This is the channel count of the mask
feature map that to be dynamically convolved with the predicted
kernel.
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Default: dict(type='ReLU', inplace=True)
norm_cfg (dict): Config dict for normalization layer. Default: None.
"""
def __init__(
self,
in_channels: int,
feat_channels: int = 256,
stacked_convs: int = 4,
num_levels: int = 3,
num_prototypes: int = 8,
act_cfg: ConfigType = dict(type='ReLU', inplace=True),
norm_cfg: ConfigType = dict(type='BN')
) -> None:
super().__init__(init_cfg=None)
self.num_levels = num_levels
self.fusion_conv = nn.Conv2d(num_levels * in_channels, in_channels, 1)
convs = []
for i in range(stacked_convs):
in_c = in_channels if i == 0 else feat_channels
convs.append(
ConvModule(
in_c,
feat_channels,
3,
padding=1,
act_cfg=act_cfg,
norm_cfg=norm_cfg))
self.stacked_convs = nn.Sequential(*convs)
self.projection = nn.Conv2d(
feat_channels, num_prototypes, kernel_size=1)
def forward(self, features: Tuple[Tensor, ...]) -> Tensor:
# multi-level feature fusion
fusion_feats = [features[0]]
size = features[0].shape[-2:]
for i in range(1, self.num_levels):
f = F.interpolate(features[i], size=size, mode='bilinear')
fusion_feats.append(f)
fusion_feats = torch.cat(fusion_feats, dim=1)
fusion_feats = self.fusion_conv(fusion_feats)
# pred mask feats
mask_features = self.stacked_convs(fusion_feats)
mask_features = self.projection(mask_features)
return mask_features
@MODELS.register_module()
class RTMDetInsSepBNHeadModule(RTMDetSepBNHeadModule):
"""Detection and Instance Segmentation Head of RTMDet.
Args:
num_classes (int): Number of categories excluding the background
category.
num_prototypes (int): Number of mask prototype features extracted
from the mask head. Defaults to 8.
dyconv_channels (int): Channel of the dynamic conv layers.
Defaults to 8.
num_dyconvs (int): Number of the dynamic convolution layers.
Defaults to 3.
use_sigmoid_cls (bool): Use sigmoid for class prediction.
Defaults to True.
"""
def __init__(self,
num_classes: int,
*args,
num_prototypes: int = 8,
dyconv_channels: int = 8,
num_dyconvs: int = 3,
use_sigmoid_cls: bool = True,
**kwargs):
self.num_prototypes = num_prototypes
self.num_dyconvs = num_dyconvs
self.dyconv_channels = dyconv_channels
self.use_sigmoid_cls = use_sigmoid_cls
if self.use_sigmoid_cls:
self.cls_out_channels = num_classes
else:
self.cls_out_channels = num_classes + 1
super().__init__(num_classes=num_classes, *args, **kwargs)
def _init_layers(self):
"""Initialize layers of the head."""
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.kernel_convs = nn.ModuleList()
self.rtm_cls = nn.ModuleList()
self.rtm_reg = nn.ModuleList()
self.rtm_kernel = nn.ModuleList()
self.rtm_obj = nn.ModuleList()
# calculate num dynamic parameters
weight_nums, bias_nums = [], []
for i in range(self.num_dyconvs):
if i == 0:
weight_nums.append(
(self.num_prototypes + 2) * self.dyconv_channels)
bias_nums.append(self.dyconv_channels)
elif i == self.num_dyconvs - 1:
weight_nums.append(self.dyconv_channels)
bias_nums.append(1)
else:
weight_nums.append(self.dyconv_channels * self.dyconv_channels)
bias_nums.append(self.dyconv_channels)
self.weight_nums = weight_nums
self.bias_nums = bias_nums
self.num_gen_params = sum(weight_nums) + sum(bias_nums)
pred_pad_size = self.pred_kernel_size // 2
for n in range(len(self.featmap_strides)):
cls_convs = nn.ModuleList()
reg_convs = nn.ModuleList()
kernel_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
kernel_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
self.cls_convs.append(cls_convs)
self.reg_convs.append(cls_convs)
self.kernel_convs.append(kernel_convs)
self.rtm_cls.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.cls_out_channels,
self.pred_kernel_size,
padding=pred_pad_size))
self.rtm_reg.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * 4,
self.pred_kernel_size,
padding=pred_pad_size))
self.rtm_kernel.append(
nn.Conv2d(
self.feat_channels,
self.num_gen_params,
self.pred_kernel_size,
padding=pred_pad_size))
if self.share_conv:
for n in range(len(self.featmap_strides)):
for i in range(self.stacked_convs):
self.cls_convs[n][i].conv = self.cls_convs[0][i].conv
self.reg_convs[n][i].conv = self.reg_convs[0][i].conv
self.mask_head = MaskFeatModule(
in_channels=self.in_channels,
feat_channels=self.feat_channels,
stacked_convs=4,
num_levels=len(self.featmap_strides),
num_prototypes=self.num_prototypes,
act_cfg=self.act_cfg,
norm_cfg=self.norm_cfg)
def init_weights(self) -> None:
"""Initialize weights of the head."""
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
bias_cls = bias_init_with_prob(0.01)
for rtm_cls, rtm_reg, rtm_kernel in zip(self.rtm_cls, self.rtm_reg,
self.rtm_kernel):
normal_init(rtm_cls, std=0.01, bias=bias_cls)
normal_init(rtm_reg, std=0.01, bias=1)
def forward(self, feats: Tuple[Tensor, ...]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * 4.
- kernel_preds (list[Tensor]): Dynamic conv kernels for all scale
levels, each is a 4D-tensor, the channels number is
num_gen_params.
- mask_feat (Tensor): Mask prototype features.
Has shape (batch_size, num_prototypes, H, W).
"""
mask_feat = self.mask_head(feats)
cls_scores = []
bbox_preds = []
kernel_preds = []
for idx, (x, stride) in enumerate(zip(feats, self.featmap_strides)):
cls_feat = x
reg_feat = x
kernel_feat = x
for cls_layer in self.cls_convs[idx]:
cls_feat = cls_layer(cls_feat)
cls_score = self.rtm_cls[idx](cls_feat)
for kernel_layer in self.kernel_convs[idx]:
kernel_feat = kernel_layer(kernel_feat)
kernel_pred = self.rtm_kernel[idx](kernel_feat)
for reg_layer in self.reg_convs[idx]:
reg_feat = reg_layer(reg_feat)
reg_dist = self.rtm_reg[idx](reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(reg_dist)
kernel_preds.append(kernel_pred)
return tuple(cls_scores), tuple(bbox_preds), tuple(
kernel_preds), mask_feat
@MODELS.register_module()
class RTMDetInsSepBNHead(RTMDetHead):
"""RTMDet Instance Segmentation head.
Args:
head_module(ConfigType): Base module used for RTMDetInsSepBNHead
prior_generator: Points generator feature maps in
2D points-based detectors.
bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_mask (:obj:`ConfigDict` or dict): Config of mask loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.MlvlPointGenerator',
offset=0,
strides=[8, 16, 32]),
bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'),
loss_cls: ConfigType = dict(
type='mmdet.QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='mmdet.GIoULoss', loss_weight=2.0),
loss_mask=dict(
type='mmdet.DiceLoss',
loss_weight=2.0,
eps=5e-6,
reduction='mean'),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
head_module=head_module,
prior_generator=prior_generator,
bbox_coder=bbox_coder,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
if isinstance(self.head_module, RTMDetInsSepBNHeadModule):
assert self.use_sigmoid_cls == self.head_module.use_sigmoid_cls
self.loss_mask = MODELS.build(loss_mask)
def predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
kernel_preds: List[Tensor],
mask_feats: Tensor,
score_factors: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = True,
with_nms: bool = True) -> List[InstanceData]:
"""Transform a batch of output features extracted from the head into
bbox results.
Note: When score_factors is not None, the cls_scores are
usually multiplied by it then obtain the real score used in NMS.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
kernel_preds (list[Tensor]): Kernel predictions of dynamic
convs for all scale levels, each is a 4D-tensor, has shape
(batch_size, num_params, H, W).
mask_feats (Tensor): Mask prototype features extracted from the
mask head, has shape (batch_size, num_prototypes, H, W).
score_factors (list[Tensor], optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, num_priors * 1, H, W). Defaults to None.
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
list[:obj:`InstanceData`]: Object detection and instance
segmentation results of each image after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, h, w).
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
multi_label = cfg.multi_label
multi_label &= self.num_classes > 1
cfg.multi_label = multi_label
num_imgs = len(batch_img_metas)
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
# If the shape does not change, use the previous mlvl_priors
if featmap_sizes != self.featmap_sizes:
self.mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
self.featmap_sizes = featmap_sizes
flatten_priors = torch.cat(self.mlvl_priors)
mlvl_strides = [
flatten_priors.new_full(
(featmap_size.numel() * self.num_base_priors, ), stride) for
featmap_size, stride in zip(featmap_sizes, self.featmap_strides)
]
flatten_stride = torch.cat(mlvl_strides)
# flatten cls_scores, bbox_preds
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_kernel_preds = [
kernel_pred.permute(0, 2, 3,
1).reshape(num_imgs, -1,
self.head_module.num_gen_params)
for kernel_pred in kernel_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_decoded_bboxes = self.bbox_coder.decode(
flatten_priors[..., :2].unsqueeze(0), flatten_bbox_preds,
flatten_stride)
flatten_kernel_preds = torch.cat(flatten_kernel_preds, dim=1)
results_list = []
for (bboxes, scores, kernel_pred, mask_feat,
img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores,
flatten_kernel_preds, mask_feats,
batch_img_metas):
ori_shape = img_meta['ori_shape']
scale_factor = img_meta['scale_factor']
if 'pad_param' in img_meta:
pad_param = img_meta['pad_param']
else:
pad_param = None
score_thr = cfg.get('score_thr', -1)
if scores.shape[0] == 0:
empty_results = InstanceData()
empty_results.bboxes = bboxes
empty_results.scores = scores[:, 0]
empty_results.labels = scores[:, 0].int()
h, w = ori_shape[:2] if rescale else img_meta['img_shape'][:2]
empty_results.masks = torch.zeros(
size=(0, h, w), dtype=torch.bool, device=bboxes.device)
results_list.append(empty_results)
continue
nms_pre = cfg.get('nms_pre', 100000)
if cfg.multi_label is False:
scores, labels = scores.max(1, keepdim=True)
scores, _, keep_idxs, results = filter_scores_and_topk(
scores,
score_thr,
nms_pre,
results=dict(
labels=labels[:, 0],
kernel_pred=kernel_pred,
priors=flatten_priors))
labels = results['labels']
kernel_pred = results['kernel_pred']
priors = results['priors']
else:
out = filter_scores_and_topk(
scores,
score_thr,
nms_pre,
results=dict(
kernel_pred=kernel_pred, priors=flatten_priors))
scores, labels, keep_idxs, filtered_results = out
kernel_pred = filtered_results['kernel_pred']
priors = filtered_results['priors']
results = InstanceData(
scores=scores,
labels=labels,
bboxes=bboxes[keep_idxs],
kernels=kernel_pred,
priors=priors)
if rescale:
if pad_param is not None:
results.bboxes -= results.bboxes.new_tensor([
pad_param[2], pad_param[0], pad_param[2], pad_param[0]
])
results.bboxes /= results.bboxes.new_tensor(
scale_factor).repeat((1, 2))
if cfg.get('yolox_style', False):
# do not need max_per_img
cfg.max_per_img = len(results)
results = self._bbox_mask_post_process(
results=results,
mask_feat=mask_feat,
cfg=cfg,
rescale_bbox=False,
rescale_mask=rescale,
with_nms=with_nms,
pad_param=pad_param,
img_meta=img_meta)
results.bboxes[:, 0::2].clamp_(0, ori_shape[1])
results.bboxes[:, 1::2].clamp_(0, ori_shape[0])
results_list.append(results)
return results_list
def _bbox_mask_post_process(
self,
results: InstanceData,
mask_feat: Tensor,
cfg: ConfigDict,
rescale_bbox: bool = False,
rescale_mask: bool = True,
with_nms: bool = True,
pad_param: Optional[np.ndarray] = None,
img_meta: Optional[dict] = None) -> InstanceData:
"""bbox and mask post-processing method.
The boxes would be rescaled to the original image scale and do
the nms operation. Usually `with_nms` is False is used for aug test.
Args:
results (:obj:`InstaceData`): Detection instance results,
each item has shape (num_bboxes, ).
mask_feat (Tensor): Mask prototype features extracted from the
mask head, has shape (batch_size, num_prototypes, H, W).
cfg (ConfigDict): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale_bbox (bool): If True, return boxes in original image space.
Default to False.
rescale_mask (bool): If True, return masks in original image space.
Default to True.
with_nms (bool): If True, do nms before return boxes.
Default to True.
img_meta (dict, optional): Image meta info. Defaults to None.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, h, w).
"""
if rescale_bbox:
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
results.bboxes = scale_boxes(results.bboxes, scale_factor)
if hasattr(results, 'score_factors'):
# TODO: Add sqrt operation in order to be consistent with
# the paper.
score_factors = results.pop('score_factors')
results.scores = results.scores * score_factors
# filter small size bboxes
if cfg.get('min_bbox_size', -1) >= 0:
w, h = get_box_wh(results.bboxes)
valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)
if not valid_mask.all():
results = results[valid_mask]
# TODO: deal with `with_nms` and `nms_cfg=None` in test_cfg
assert with_nms, 'with_nms must be True for RTMDet-Ins'
if results.bboxes.numel() > 0:
bboxes = get_box_tensor(results.bboxes)
det_bboxes, keep_idxs = batched_nms(bboxes, results.scores,
results.labels, cfg.nms)
results = results[keep_idxs]
# some nms would reweight the score, such as softnms
results.scores = det_bboxes[:, -1]
results = results[:cfg.max_per_img]
# process masks
mask_logits = self._mask_predict_by_feat(mask_feat,
results.kernels,
results.priors)
stride = self.prior_generator.strides[0][0]
mask_logits = F.interpolate(
mask_logits.unsqueeze(0), scale_factor=stride, mode='bilinear')
if rescale_mask:
# TODO: When use mmdet.Resize or mmdet.Pad, will meet bug
# Use img_meta to crop and resize
ori_h, ori_w = img_meta['ori_shape'][:2]
if isinstance(pad_param, np.ndarray):
pad_param = pad_param.astype(np.int32)
crop_y1, crop_y2 = pad_param[
0], mask_logits.shape[-2] - pad_param[1]
crop_x1, crop_x2 = pad_param[
2], mask_logits.shape[-1] - pad_param[3]
mask_logits = mask_logits[..., crop_y1:crop_y2,
crop_x1:crop_x2]
mask_logits = F.interpolate(
mask_logits,
size=[ori_h, ori_w],
mode='bilinear',
align_corners=False)
masks = mask_logits.sigmoid().squeeze(0)
masks = masks > cfg.mask_thr_binary
results.masks = masks
else:
h, w = img_meta['ori_shape'][:2] if rescale_mask else img_meta[
'img_shape'][:2]
results.masks = torch.zeros(
size=(results.bboxes.shape[0], h, w),
dtype=torch.bool,
device=results.bboxes.device)
return results
def _mask_predict_by_feat(self, mask_feat: Tensor, kernels: Tensor,
priors: Tensor) -> Tensor:
"""Generate mask logits from mask features with dynamic convs.
Args:
mask_feat (Tensor): Mask prototype features.
Has shape (num_prototypes, H, W).
kernels (Tensor): Kernel parameters for each instance.
Has shape (num_instance, num_params)
priors (Tensor): Center priors for each instance.
Has shape (num_instance, 4).
Returns:
Tensor: Instance segmentation masks for each instance.
Has shape (num_instance, H, W).
"""
num_inst = kernels.shape[0]
h, w = mask_feat.size()[-2:]
if num_inst < 1:
return torch.empty(
size=(num_inst, h, w),
dtype=mask_feat.dtype,
device=mask_feat.device)
if len(mask_feat.shape) < 4:
mask_feat.unsqueeze(0)
coord = self.prior_generator.single_level_grid_priors(
(h, w), level_idx=0, device=mask_feat.device).reshape(1, -1, 2)
num_inst = priors.shape[0]
points = priors[:, :2].reshape(-1, 1, 2)
strides = priors[:, 2:].reshape(-1, 1, 2)
relative_coord = (points - coord).permute(0, 2, 1) / (
strides[..., 0].reshape(-1, 1, 1) * 8)
relative_coord = relative_coord.reshape(num_inst, 2, h, w)
mask_feat = torch.cat(
[relative_coord,
mask_feat.repeat(num_inst, 1, 1, 1)], dim=1)
weights, biases = self.parse_dynamic_params(kernels)
n_layers = len(weights)
x = mask_feat.reshape(1, -1, h, w)
for i, (weight, bias) in enumerate(zip(weights, biases)):
x = F.conv2d(
x, weight, bias=bias, stride=1, padding=0, groups=num_inst)
if i < n_layers - 1:
x = F.relu(x)
x = x.reshape(num_inst, h, w)
return x
def parse_dynamic_params(self, flatten_kernels: Tensor) -> tuple:
"""split kernel head prediction to conv weight and bias."""
n_inst = flatten_kernels.size(0)
n_layers = len(self.head_module.weight_nums)
params_splits = list(
torch.split_with_sizes(
flatten_kernels,
self.head_module.weight_nums + self.head_module.bias_nums,
dim=1))
weight_splits = params_splits[:n_layers]
bias_splits = params_splits[n_layers:]
for i in range(n_layers):
if i < n_layers - 1:
weight_splits[i] = weight_splits[i].reshape(
n_inst * self.head_module.dyconv_channels, -1, 1, 1)
bias_splits[i] = bias_splits[i].reshape(
n_inst * self.head_module.dyconv_channels)
else:
weight_splits[i] = weight_splits[i].reshape(n_inst, -1, 1, 1)
bias_splits[i] = bias_splits[i].reshape(n_inst)
return weight_splits, bias_splits
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
raise NotImplementedError
| 30,484 | 40.990358 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/yolov5_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import math
from typing import List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
from mmdet.models.dense_heads.base_dense_head import BaseDenseHead
from mmdet.models.utils import filter_scores_and_topk, multi_apply
from mmdet.structures.bbox import bbox_overlaps
from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList,
OptMultiConfig)
from mmengine.config import ConfigDict
from mmengine.dist import get_dist_info
from mmengine.logging import print_log
from mmengine.model import BaseModule
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS, TASK_UTILS
from ..utils import make_divisible
def get_prior_xy_info(index: int, num_base_priors: int,
featmap_sizes: int) -> Tuple[int, int, int]:
"""Get prior index and xy index in feature map by flatten index."""
_, featmap_w = featmap_sizes
priors = index % num_base_priors
xy_index = index // num_base_priors
grid_y = xy_index // featmap_w
grid_x = xy_index % featmap_w
return priors, grid_x, grid_y
@MODELS.register_module()
class YOLOv5HeadModule(BaseModule):
"""YOLOv5Head head module used in `YOLOv5`.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (Union[int, Sequence]): Number of channels in the input
feature map.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_base_priors (int): The number of priors (points) at a point
on the feature grid.
featmap_strides (Sequence[int]): Downsample factor of each feature map.
Defaults to (8, 16, 32).
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
num_classes: int,
in_channels: Union[int, Sequence],
widen_factor: float = 1.0,
num_base_priors: int = 3,
featmap_strides: Sequence[int] = (8, 16, 32),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.widen_factor = widen_factor
self.featmap_strides = featmap_strides
self.num_out_attrib = 5 + self.num_classes
self.num_levels = len(self.featmap_strides)
self.num_base_priors = num_base_priors
if isinstance(in_channels, int):
self.in_channels = [make_divisible(in_channels, widen_factor)
] * self.num_levels
else:
self.in_channels = [
make_divisible(i, widen_factor) for i in in_channels
]
self._init_layers()
def _init_layers(self):
"""initialize conv layers in YOLOv5 head."""
self.convs_pred = nn.ModuleList()
for i in range(self.num_levels):
conv_pred = nn.Conv2d(self.in_channels[i],
self.num_base_priors * self.num_out_attrib,
1)
self.convs_pred.append(conv_pred)
def init_weights(self):
"""Initialize the bias of YOLOv5 head."""
super().init_weights()
for mi, s in zip(self.convs_pred, self.featmap_strides): # from
b = mi.bias.data.view(self.num_base_priors, -1)
# obj (8 objects per 640 image)
b.data[:, 4] += math.log(8 / (640 / s)**2)
b.data[:, 5:] += math.log(0.6 / (self.num_classes - 0.999999))
mi.bias.data = b.view(-1)
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions, and objectnesses.
"""
assert len(x) == self.num_levels
return multi_apply(self.forward_single, x, self.convs_pred)
def forward_single(self, x: Tensor,
convs: nn.Module) -> Tuple[Tensor, Tensor, Tensor]:
"""Forward feature of a single scale level."""
pred_map = convs(x)
bs, _, ny, nx = pred_map.shape
pred_map = pred_map.view(bs, self.num_base_priors, self.num_out_attrib,
ny, nx)
cls_score = pred_map[:, :, 5:, ...].reshape(bs, -1, ny, nx)
bbox_pred = pred_map[:, :, :4, ...].reshape(bs, -1, ny, nx)
objectness = pred_map[:, :, 4:5, ...].reshape(bs, -1, ny, nx)
return cls_score, bbox_pred, objectness
@MODELS.register_module()
class YOLOv5Head(BaseDenseHead):
"""YOLOv5Head head used in `YOLOv5`.
Args:
head_module(ConfigType): Base module used for YOLOv5Head
prior_generator(dict): Points generator feature maps in
2D points-based detectors.
bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_obj (:obj:`ConfigDict` or dict): Config of objectness loss.
prior_match_thr (float): Defaults to 4.0.
ignore_iof_thr (float): Defaults to -1.0.
obj_level_weights (List[float]): Defaults to [4.0, 1.0, 0.4].
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.YOLOAnchorGenerator',
base_sizes=[[(10, 13), (16, 30), (33, 23)],
[(30, 61), (62, 45), (59, 119)],
[(116, 90), (156, 198), (373, 326)]],
strides=[8, 16, 32]),
bbox_coder: ConfigType = dict(type='YOLOv5BBoxCoder'),
loss_cls: ConfigType = dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=0.5),
loss_bbox: ConfigType = dict(
type='IoULoss',
iou_mode='ciou',
bbox_format='xywh',
eps=1e-7,
reduction='mean',
loss_weight=0.05,
return_iou=True),
loss_obj: ConfigType = dict(
type='mmdet.CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=1.0),
prior_match_thr: float = 4.0,
near_neighbor_thr: float = 0.5,
ignore_iof_thr: float = -1.0,
obj_level_weights: List[float] = [4.0, 1.0, 0.4],
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
self.head_module = MODELS.build(head_module)
self.num_classes = self.head_module.num_classes
self.featmap_strides = self.head_module.featmap_strides
self.num_levels = len(self.featmap_strides)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.loss_cls: nn.Module = MODELS.build(loss_cls)
self.loss_bbox: nn.Module = MODELS.build(loss_bbox)
self.loss_obj: nn.Module = MODELS.build(loss_obj)
self.prior_generator = TASK_UTILS.build(prior_generator)
self.bbox_coder = TASK_UTILS.build(bbox_coder)
self.num_base_priors = self.prior_generator.num_base_priors[0]
self.featmap_sizes = [torch.empty(1)] * self.num_levels
self.prior_match_thr = prior_match_thr
self.near_neighbor_thr = near_neighbor_thr
self.obj_level_weights = obj_level_weights
self.ignore_iof_thr = ignore_iof_thr
self.special_init()
def special_init(self):
"""Since YOLO series algorithms will inherit from YOLOv5Head, but
different algorithms have special initialization process.
The special_init function is designed to deal with this situation.
"""
assert len(self.obj_level_weights) == len(
self.featmap_strides) == self.num_levels
if self.prior_match_thr != 4.0:
print_log(
"!!!Now, you've changed the prior_match_thr "
'parameter to something other than 4.0. Please make sure '
'that you have modified both the regression formula in '
'bbox_coder and before loss_box computation, '
'otherwise the accuracy may be degraded!!!')
if self.num_classes == 1:
print_log('!!!You are using `YOLOv5Head` with num_classes == 1.'
' The loss_cls will be 0. This is a normal phenomenon.')
priors_base_sizes = torch.tensor(
self.prior_generator.base_sizes, dtype=torch.float)
featmap_strides = torch.tensor(
self.featmap_strides, dtype=torch.float)[:, None, None]
self.register_buffer(
'priors_base_sizes',
priors_base_sizes / featmap_strides,
persistent=False)
grid_offset = torch.tensor([
[0, 0], # center
[1, 0], # left
[0, 1], # up
[-1, 0], # right
[0, -1], # bottom
]).float()
self.register_buffer(
'grid_offset', grid_offset[:, None], persistent=False)
prior_inds = torch.arange(self.num_base_priors).float().view(
self.num_base_priors, 1)
self.register_buffer('prior_inds', prior_inds, persistent=False)
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions, and objectnesses.
"""
return self.head_module(x)
def predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = True,
with_nms: bool = True) -> List[InstanceData]:
"""Transform a batch of output features extracted by the head into
bbox results.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
list[:obj:`InstanceData`]: Object detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
assert len(cls_scores) == len(bbox_preds)
if objectnesses is None:
with_objectnesses = False
else:
with_objectnesses = True
assert len(cls_scores) == len(objectnesses)
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
multi_label = cfg.multi_label
multi_label &= self.num_classes > 1
cfg.multi_label = multi_label
num_imgs = len(batch_img_metas)
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
# If the shape does not change, use the previous mlvl_priors
if featmap_sizes != self.featmap_sizes:
self.mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device)
self.featmap_sizes = featmap_sizes
flatten_priors = torch.cat(self.mlvl_priors)
mlvl_strides = [
flatten_priors.new_full(
(featmap_size.numel() * self.num_base_priors, ), stride) for
featmap_size, stride in zip(featmap_sizes, self.featmap_strides)
]
flatten_stride = torch.cat(mlvl_strides)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_decoded_bboxes = self.bbox_coder.decode(
flatten_priors[None], flatten_bbox_preds, flatten_stride)
if with_objectnesses:
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()
else:
flatten_objectness = [None for _ in range(num_imgs)]
results_list = []
for (bboxes, scores, objectness,
img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores,
flatten_objectness, batch_img_metas):
ori_shape = img_meta['ori_shape']
scale_factor = img_meta['scale_factor']
if 'pad_param' in img_meta:
pad_param = img_meta['pad_param']
else:
pad_param = None
score_thr = cfg.get('score_thr', -1)
# yolox_style does not require the following operations
if objectness is not None and score_thr > 0 and not cfg.get(
'yolox_style', False):
conf_inds = objectness > score_thr
bboxes = bboxes[conf_inds, :]
scores = scores[conf_inds, :]
objectness = objectness[conf_inds]
if objectness is not None:
# conf = obj_conf * cls_conf
scores *= objectness[:, None]
if scores.shape[0] == 0:
empty_results = InstanceData()
empty_results.bboxes = bboxes
empty_results.scores = scores[:, 0]
empty_results.labels = scores[:, 0].int()
results_list.append(empty_results)
continue
nms_pre = cfg.get('nms_pre', 100000)
if cfg.multi_label is False:
scores, labels = scores.max(1, keepdim=True)
scores, _, keep_idxs, results = filter_scores_and_topk(
scores,
score_thr,
nms_pre,
results=dict(labels=labels[:, 0]))
labels = results['labels']
else:
scores, labels, keep_idxs, _ = filter_scores_and_topk(
scores, score_thr, nms_pre)
results = InstanceData(
scores=scores, labels=labels, bboxes=bboxes[keep_idxs])
if rescale:
if pad_param is not None:
results.bboxes -= results.bboxes.new_tensor([
pad_param[2], pad_param[0], pad_param[2], pad_param[0]
])
results.bboxes /= results.bboxes.new_tensor(
scale_factor).repeat((1, 2))
if cfg.get('yolox_style', False):
# do not need max_per_img
cfg.max_per_img = len(results)
results = self._bbox_post_process(
results=results,
cfg=cfg,
rescale=False,
with_nms=with_nms,
img_meta=img_meta)
results.bboxes[:, 0::2].clamp_(0, ori_shape[1])
results.bboxes[:, 1::2].clamp_(0, ori_shape[0])
results_list.append(results)
return results_list
def loss(self, x: Tuple[Tensor], batch_data_samples: Union[list,
dict]) -> dict:
"""Perform forward propagation and loss calculation of the detection
head on the features of the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
batch_data_samples (List[:obj:`DetDataSample`], dict): The Data
Samples. It usually includes information such as
`gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`.
Returns:
dict: A dictionary of loss components.
"""
if isinstance(batch_data_samples, list):
losses = super().loss(x, batch_data_samples)
else:
outs = self(x)
# Fast version
loss_inputs = outs + (batch_data_samples['bboxes_labels'],
batch_data_samples['img_metas'])
losses = self.loss_by_feat(*loss_inputs)
return losses
def loss_by_feat(
self,
cls_scores: Sequence[Tensor],
bbox_preds: Sequence[Tensor],
objectnesses: Sequence[Tensor],
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_gt_instances (Sequence[InstanceData]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (Sequence[dict]): Meta information of each image,
e.g., image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
if self.ignore_iof_thr != -1:
# TODO: Support fast version
# convert ignore gt
batch_target_ignore_list = []
for i, gt_instances_ignore in enumerate(batch_gt_instances_ignore):
bboxes = gt_instances_ignore.bboxes
labels = gt_instances_ignore.labels
index = bboxes.new_full((len(bboxes), 1), i)
# (batch_idx, label, bboxes)
target = torch.cat((index, labels[:, None].float(), bboxes),
dim=1)
batch_target_ignore_list.append(target)
# (num_bboxes, 6)
batch_gt_targets_ignore = torch.cat(
batch_target_ignore_list, dim=0)
if batch_gt_targets_ignore.shape[0] != 0:
# Consider regions with ignore in annotations
return self._loss_by_feat_with_ignore(
cls_scores,
bbox_preds,
objectnesses,
batch_gt_instances=batch_gt_instances,
batch_img_metas=batch_img_metas,
batch_gt_instances_ignore=batch_gt_targets_ignore)
# 1. Convert gt to norm format
batch_targets_normed = self._convert_gt_to_norm_format(
batch_gt_instances, batch_img_metas)
device = cls_scores[0].device
loss_cls = torch.zeros(1, device=device)
loss_box = torch.zeros(1, device=device)
loss_obj = torch.zeros(1, device=device)
scaled_factor = torch.ones(7, device=device)
for i in range(self.num_levels):
batch_size, _, h, w = bbox_preds[i].shape
target_obj = torch.zeros_like(objectnesses[i])
# empty gt bboxes
if batch_targets_normed.shape[1] == 0:
loss_box += bbox_preds[i].sum() * 0
loss_cls += cls_scores[i].sum() * 0
loss_obj += self.loss_obj(
objectnesses[i], target_obj) * self.obj_level_weights[i]
continue
priors_base_sizes_i = self.priors_base_sizes[i]
# feature map scale whwh
scaled_factor[2:6] = torch.tensor(
bbox_preds[i].shape)[[3, 2, 3, 2]]
# Scale batch_targets from range 0-1 to range 0-features_maps size.
# (num_base_priors, num_bboxes, 7)
batch_targets_scaled = batch_targets_normed * scaled_factor
# 2. Shape match
wh_ratio = batch_targets_scaled[...,
4:6] / priors_base_sizes_i[:, None]
match_inds = torch.max(
wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr
batch_targets_scaled = batch_targets_scaled[match_inds]
# no gt bbox matches anchor
if batch_targets_scaled.shape[0] == 0:
loss_box += bbox_preds[i].sum() * 0
loss_cls += cls_scores[i].sum() * 0
loss_obj += self.loss_obj(
objectnesses[i], target_obj) * self.obj_level_weights[i]
continue
# 3. Positive samples with additional neighbors
# check the left, up, right, bottom sides of the
# targets grid, and determine whether assigned
# them as positive samples as well.
batch_targets_cxcy = batch_targets_scaled[:, 2:4]
grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy
left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) &
(batch_targets_cxcy > 1)).T
right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) &
(grid_xy > 1)).T
offset_inds = torch.stack(
(torch.ones_like(left), left, up, right, bottom))
batch_targets_scaled = batch_targets_scaled.repeat(
(5, 1, 1))[offset_inds]
retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1],
1)[offset_inds]
# prepare pred results and positive sample indexes to
# calculate class loss and bbox lo
_chunk_targets = batch_targets_scaled.chunk(4, 1)
img_class_inds, grid_xy, grid_wh, priors_inds = _chunk_targets
priors_inds, (img_inds, class_inds) = priors_inds.long().view(
-1), img_class_inds.long().T
grid_xy_long = (grid_xy -
retained_offsets * self.near_neighbor_thr).long()
grid_x_inds, grid_y_inds = grid_xy_long.T
bboxes_targets = torch.cat((grid_xy - grid_xy_long, grid_wh), 1)
# 4. Calculate loss
# bbox loss
retained_bbox_pred = bbox_preds[i].reshape(
batch_size, self.num_base_priors, -1, h,
w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds]
priors_base_sizes_i = priors_base_sizes_i[priors_inds]
decoded_bbox_pred = self._decode_bbox_to_xywh(
retained_bbox_pred, priors_base_sizes_i)
loss_box_i, iou = self.loss_bbox(decoded_bbox_pred, bboxes_targets)
loss_box += loss_box_i
# obj loss
iou = iou.detach().clamp(0)
target_obj[img_inds, priors_inds, grid_y_inds,
grid_x_inds] = iou.type(target_obj.dtype)
loss_obj += self.loss_obj(objectnesses[i],
target_obj) * self.obj_level_weights[i]
# cls loss
if self.num_classes > 1:
pred_cls_scores = cls_scores[i].reshape(
batch_size, self.num_base_priors, -1, h,
w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds]
target_class = torch.full_like(pred_cls_scores, 0.)
target_class[range(batch_targets_scaled.shape[0]),
class_inds] = 1.
loss_cls += self.loss_cls(pred_cls_scores, target_class)
else:
loss_cls += cls_scores[i].sum() * 0
_, world_size = get_dist_info()
return dict(
loss_cls=loss_cls * batch_size * world_size,
loss_obj=loss_obj * batch_size * world_size,
loss_bbox=loss_box * batch_size * world_size)
def _convert_gt_to_norm_format(self,
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict]) -> Tensor:
if isinstance(batch_gt_instances, torch.Tensor):
# fast version
img_shape = batch_img_metas[0]['batch_input_shape']
gt_bboxes_xyxy = batch_gt_instances[:, 2:]
xy1, xy2 = gt_bboxes_xyxy.split((2, 2), dim=-1)
gt_bboxes_xywh = torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1)
gt_bboxes_xywh[:, 1::2] /= img_shape[0]
gt_bboxes_xywh[:, 0::2] /= img_shape[1]
batch_gt_instances[:, 2:] = gt_bboxes_xywh
# (num_base_priors, num_bboxes, 6)
batch_targets_normed = batch_gt_instances.repeat(
self.num_base_priors, 1, 1)
else:
batch_target_list = []
# Convert xyxy bbox to yolo format.
for i, gt_instances in enumerate(batch_gt_instances):
img_shape = batch_img_metas[i]['batch_input_shape']
bboxes = gt_instances.bboxes
labels = gt_instances.labels
xy1, xy2 = bboxes.split((2, 2), dim=-1)
bboxes = torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1)
# normalized to 0-1
bboxes[:, 1::2] /= img_shape[0]
bboxes[:, 0::2] /= img_shape[1]
index = bboxes.new_full((len(bboxes), 1), i)
# (batch_idx, label, normed_bbox)
target = torch.cat((index, labels[:, None].float(), bboxes),
dim=1)
batch_target_list.append(target)
# (num_base_priors, num_bboxes, 6)
batch_targets_normed = torch.cat(
batch_target_list, dim=0).repeat(self.num_base_priors, 1, 1)
# (num_base_priors, num_bboxes, 1)
batch_targets_prior_inds = self.prior_inds.repeat(
1, batch_targets_normed.shape[1])[..., None]
# (num_base_priors, num_bboxes, 7)
# (img_ind, labels, bbox_cx, bbox_cy, bbox_w, bbox_h, prior_ind)
batch_targets_normed = torch.cat(
(batch_targets_normed, batch_targets_prior_inds), 2)
return batch_targets_normed
def _decode_bbox_to_xywh(self, bbox_pred, priors_base_sizes) -> Tensor:
bbox_pred = bbox_pred.sigmoid()
pred_xy = bbox_pred[:, :2] * 2 - 0.5
pred_wh = (bbox_pred[:, 2:] * 2)**2 * priors_base_sizes
decoded_bbox_pred = torch.cat((pred_xy, pred_wh), dim=-1)
return decoded_bbox_pred
def _loss_by_feat_with_ignore(
self, cls_scores: Sequence[Tensor], bbox_preds: Sequence[Tensor],
objectnesses: Sequence[Tensor],
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: Sequence[Tensor]) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_gt_instances (Sequence[InstanceData]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (Sequence[dict]): Meta information of each image,
e.g., image size, scaling factor, etc.
batch_gt_instances_ignore (Sequence[Tensor]): Ignore boxes with
batch_ids and labels, each is a 2D-tensor, the channel number
is 6, means that (batch_id, label, xmin, ymin, xmax, ymax).
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
# 1. Convert gt to norm format
batch_targets_normed = self._convert_gt_to_norm_format(
batch_gt_instances, batch_img_metas)
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
if featmap_sizes != self.featmap_sizes:
self.mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device)
self.featmap_sizes = featmap_sizes
device = cls_scores[0].device
loss_cls = torch.zeros(1, device=device)
loss_box = torch.zeros(1, device=device)
loss_obj = torch.zeros(1, device=device)
scaled_factor = torch.ones(7, device=device)
for i in range(self.num_levels):
batch_size, _, h, w = bbox_preds[i].shape
target_obj = torch.zeros_like(objectnesses[i])
not_ignore_flags = bbox_preds[i].new_ones(batch_size,
self.num_base_priors, h,
w)
ignore_overlaps = bbox_overlaps(self.mlvl_priors[i],
batch_gt_instances_ignore[..., 2:],
'iof')
ignore_max_overlaps, ignore_max_ignore_index = ignore_overlaps.max(
dim=1)
batch_inds = batch_gt_instances_ignore[:,
0][ignore_max_ignore_index]
ignore_inds = (ignore_max_overlaps > self.ignore_iof_thr).nonzero(
as_tuple=True)[0]
batch_inds = batch_inds[ignore_inds].long()
ignore_priors, ignore_grid_xs, ignore_grid_ys = get_prior_xy_info(
ignore_inds, self.num_base_priors, self.featmap_sizes[i])
not_ignore_flags[batch_inds, ignore_priors, ignore_grid_ys,
ignore_grid_xs] = 0
# empty gt bboxes
if batch_targets_normed.shape[1] == 0:
loss_box += bbox_preds[i].sum() * 0
loss_cls += cls_scores[i].sum() * 0
loss_obj += self.loss_obj(
objectnesses[i],
target_obj,
weight=not_ignore_flags,
avg_factor=max(not_ignore_flags.sum(),
1)) * self.obj_level_weights[i]
continue
priors_base_sizes_i = self.priors_base_sizes[i]
# feature map scale whwh
scaled_factor[2:6] = torch.tensor(
bbox_preds[i].shape)[[3, 2, 3, 2]]
# Scale batch_targets from range 0-1 to range 0-features_maps size.
# (num_base_priors, num_bboxes, 7)
batch_targets_scaled = batch_targets_normed * scaled_factor
# 2. Shape match
wh_ratio = batch_targets_scaled[...,
4:6] / priors_base_sizes_i[:, None]
match_inds = torch.max(
wh_ratio, 1 / wh_ratio).max(2)[0] < self.prior_match_thr
batch_targets_scaled = batch_targets_scaled[match_inds]
# no gt bbox matches anchor
if batch_targets_scaled.shape[0] == 0:
loss_box += bbox_preds[i].sum() * 0
loss_cls += cls_scores[i].sum() * 0
loss_obj += self.loss_obj(
objectnesses[i],
target_obj,
weight=not_ignore_flags,
avg_factor=max(not_ignore_flags.sum(),
1)) * self.obj_level_weights[i]
continue
# 3. Positive samples with additional neighbors
# check the left, up, right, bottom sides of the
# targets grid, and determine whether assigned
# them as positive samples as well.
batch_targets_cxcy = batch_targets_scaled[:, 2:4]
grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy
left, up = ((batch_targets_cxcy % 1 < self.near_neighbor_thr) &
(batch_targets_cxcy > 1)).T
right, bottom = ((grid_xy % 1 < self.near_neighbor_thr) &
(grid_xy > 1)).T
offset_inds = torch.stack(
(torch.ones_like(left), left, up, right, bottom))
batch_targets_scaled = batch_targets_scaled.repeat(
(5, 1, 1))[offset_inds]
retained_offsets = self.grid_offset.repeat(1, offset_inds.shape[1],
1)[offset_inds]
# prepare pred results and positive sample indexes to
# calculate class loss and bbox lo
_chunk_targets = batch_targets_scaled.chunk(4, 1)
img_class_inds, grid_xy, grid_wh, priors_inds = _chunk_targets
priors_inds, (img_inds, class_inds) = priors_inds.long().view(
-1), img_class_inds.long().T
grid_xy_long = (grid_xy -
retained_offsets * self.near_neighbor_thr).long()
grid_x_inds, grid_y_inds = grid_xy_long.T
bboxes_targets = torch.cat((grid_xy - grid_xy_long, grid_wh), 1)
# 4. Calculate loss
# bbox loss
retained_bbox_pred = bbox_preds[i].reshape(
batch_size, self.num_base_priors, -1, h,
w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds]
priors_base_sizes_i = priors_base_sizes_i[priors_inds]
decoded_bbox_pred = self._decode_bbox_to_xywh(
retained_bbox_pred, priors_base_sizes_i)
not_ignore_weights = not_ignore_flags[img_inds, priors_inds,
grid_y_inds, grid_x_inds]
loss_box_i, iou = self.loss_bbox(
decoded_bbox_pred,
bboxes_targets,
weight=not_ignore_weights,
avg_factor=max(not_ignore_weights.sum(), 1))
loss_box += loss_box_i
# obj loss
iou = iou.detach().clamp(0)
target_obj[img_inds, priors_inds, grid_y_inds,
grid_x_inds] = iou.type(target_obj.dtype)
loss_obj += self.loss_obj(
objectnesses[i],
target_obj,
weight=not_ignore_flags,
avg_factor=max(not_ignore_flags.sum(),
1)) * self.obj_level_weights[i]
# cls loss
if self.num_classes > 1:
pred_cls_scores = cls_scores[i].reshape(
batch_size, self.num_base_priors, -1, h,
w)[img_inds, priors_inds, :, grid_y_inds, grid_x_inds]
target_class = torch.full_like(pred_cls_scores, 0.)
target_class[range(batch_targets_scaled.shape[0]),
class_inds] = 1.
loss_cls += self.loss_cls(
pred_cls_scores,
target_class,
weight=not_ignore_weights[:, None].repeat(
1, self.num_classes),
avg_factor=max(not_ignore_weights.sum(), 1))
else:
loss_cls += cls_scores[i].sum() * 0
_, world_size = get_dist_info()
return dict(
loss_cls=loss_cls * batch_size * world_size,
loss_obj=loss_obj * batch_size * world_size,
loss_bbox=loss_box * batch_size * world_size)
| 38,981 | 42.750842 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/yolov7_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import List, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.utils import multi_apply
from mmdet.utils import ConfigType, OptInstanceList
from mmengine.dist import get_dist_info
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS
from ..layers import ImplicitA, ImplicitM
from ..task_modules.assigners.batch_yolov7_assigner import BatchYOLOv7Assigner
from .yolov5_head import YOLOv5Head, YOLOv5HeadModule
@MODELS.register_module()
class YOLOv7HeadModule(YOLOv5HeadModule):
"""YOLOv7Head head module used in YOLOv7."""
def _init_layers(self):
"""initialize conv layers in YOLOv7 head."""
self.convs_pred = nn.ModuleList()
for i in range(self.num_levels):
conv_pred = nn.Sequential(
ImplicitA(self.in_channels[i]),
nn.Conv2d(self.in_channels[i],
self.num_base_priors * self.num_out_attrib, 1),
ImplicitM(self.num_base_priors * self.num_out_attrib),
)
self.convs_pred.append(conv_pred)
def init_weights(self):
"""Initialize the bias of YOLOv7 head."""
super(YOLOv5HeadModule, self).init_weights()
for mi, s in zip(self.convs_pred, self.featmap_strides): # from
mi = mi[1] # nn.Conv2d
b = mi.bias.data.view(3, -1)
# obj (8 objects per 640 image)
b.data[:, 4] += math.log(8 / (640 / s)**2)
b.data[:, 5:] += math.log(0.6 / (self.num_classes - 0.99))
mi.bias.data = b.view(-1)
@MODELS.register_module()
class YOLOv7p6HeadModule(YOLOv5HeadModule):
"""YOLOv7Head head module used in YOLOv7."""
def __init__(self,
*args,
main_out_channels: Sequence[int] = [256, 512, 768, 1024],
aux_out_channels: Sequence[int] = [320, 640, 960, 1280],
use_aux: bool = True,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
**kwargs):
self.main_out_channels = main_out_channels
self.aux_out_channels = aux_out_channels
self.use_aux = use_aux
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
super().__init__(*args, **kwargs)
def _init_layers(self):
"""initialize conv layers in YOLOv7 head."""
self.main_convs_pred = nn.ModuleList()
for i in range(self.num_levels):
conv_pred = nn.Sequential(
ConvModule(
self.in_channels[i],
self.main_out_channels[i],
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ImplicitA(self.main_out_channels[i]),
nn.Conv2d(self.main_out_channels[i],
self.num_base_priors * self.num_out_attrib, 1),
ImplicitM(self.num_base_priors * self.num_out_attrib),
)
self.main_convs_pred.append(conv_pred)
if self.use_aux:
self.aux_convs_pred = nn.ModuleList()
for i in range(self.num_levels):
aux_pred = nn.Sequential(
ConvModule(
self.in_channels[i],
self.aux_out_channels[i],
3,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
nn.Conv2d(self.aux_out_channels[i],
self.num_base_priors * self.num_out_attrib, 1))
self.aux_convs_pred.append(aux_pred)
else:
self.aux_convs_pred = [None] * len(self.main_convs_pred)
def init_weights(self):
"""Initialize the bias of YOLOv5 head."""
super(YOLOv5HeadModule, self).init_weights()
for mi, aux, s in zip(self.main_convs_pred, self.aux_convs_pred,
self.featmap_strides): # from
mi = mi[2] # nn.Conv2d
b = mi.bias.data.view(3, -1)
# obj (8 objects per 640 image)
b.data[:, 4] += math.log(8 / (640 / s)**2)
b.data[:, 5:] += math.log(0.6 / (self.num_classes - 0.99))
mi.bias.data = b.view(-1)
if self.use_aux:
aux = aux[1] # nn.Conv2d
b = aux.bias.data.view(3, -1)
# obj (8 objects per 640 image)
b.data[:, 4] += math.log(8 / (640 / s)**2)
b.data[:, 5:] += math.log(0.6 / (self.num_classes - 0.99))
mi.bias.data = b.view(-1)
def forward(self, x: Tuple[Tensor]) -> Tuple[List]:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions, and objectnesses.
"""
assert len(x) == self.num_levels
return multi_apply(self.forward_single, x, self.main_convs_pred,
self.aux_convs_pred)
def forward_single(self, x: Tensor, convs: nn.Module,
aux_convs: Optional[nn.Module]) \
-> Tuple[Union[Tensor, List], Union[Tensor, List],
Union[Tensor, List]]:
"""Forward feature of a single scale level."""
pred_map = convs(x)
bs, _, ny, nx = pred_map.shape
pred_map = pred_map.view(bs, self.num_base_priors, self.num_out_attrib,
ny, nx)
cls_score = pred_map[:, :, 5:, ...].reshape(bs, -1, ny, nx)
bbox_pred = pred_map[:, :, :4, ...].reshape(bs, -1, ny, nx)
objectness = pred_map[:, :, 4:5, ...].reshape(bs, -1, ny, nx)
if not self.training or not self.use_aux:
return cls_score, bbox_pred, objectness
else:
aux_pred_map = aux_convs(x)
aux_pred_map = aux_pred_map.view(bs, self.num_base_priors,
self.num_out_attrib, ny, nx)
aux_cls_score = aux_pred_map[:, :, 5:, ...].reshape(bs, -1, ny, nx)
aux_bbox_pred = aux_pred_map[:, :, :4, ...].reshape(bs, -1, ny, nx)
aux_objectness = aux_pred_map[:, :, 4:5,
...].reshape(bs, -1, ny, nx)
return [cls_score,
aux_cls_score], [bbox_pred, aux_bbox_pred
], [objectness, aux_objectness]
@MODELS.register_module()
class YOLOv7Head(YOLOv5Head):
"""YOLOv7Head head used in `YOLOv7 <https://arxiv.org/abs/2207.02696>`_.
Args:
simota_candidate_topk (int): The candidate top-k which used to
get top-k ious to calculate dynamic-k in BatchYOLOv7Assigner.
Defaults to 10.
simota_iou_weight (float): The scale factor for regression
iou cost in BatchYOLOv7Assigner. Defaults to 3.0.
simota_cls_weight (float): The scale factor for classification
cost in BatchYOLOv7Assigner. Defaults to 1.0.
"""
def __init__(self,
*args,
simota_candidate_topk: int = 20,
simota_iou_weight: float = 3.0,
simota_cls_weight: float = 1.0,
aux_loss_weights: float = 0.25,
**kwargs):
super().__init__(*args, **kwargs)
self.aux_loss_weights = aux_loss_weights
self.assigner = BatchYOLOv7Assigner(
num_classes=self.num_classes,
num_base_priors=self.num_base_priors,
featmap_strides=self.featmap_strides,
prior_match_thr=self.prior_match_thr,
candidate_topk=simota_candidate_topk,
iou_weight=simota_iou_weight,
cls_weight=simota_cls_weight)
def loss_by_feat(
self,
cls_scores: Sequence[Union[Tensor, List]],
bbox_preds: Sequence[Union[Tensor, List]],
objectnesses: Sequence[Union[Tensor, List]],
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
if isinstance(cls_scores[0], Sequence):
with_aux = True
batch_size = cls_scores[0][0].shape[0]
device = cls_scores[0][0].device
bbox_preds_main, bbox_preds_aux = zip(*bbox_preds)
objectnesses_main, objectnesses_aux = zip(*objectnesses)
cls_scores_main, cls_scores_aux = zip(*cls_scores)
head_preds = self._merge_predict_results(bbox_preds_main,
objectnesses_main,
cls_scores_main)
head_preds_aux = self._merge_predict_results(
bbox_preds_aux, objectnesses_aux, cls_scores_aux)
else:
with_aux = False
batch_size = cls_scores[0].shape[0]
device = cls_scores[0].device
head_preds = self._merge_predict_results(bbox_preds, objectnesses,
cls_scores)
# Convert gt to norm xywh format
# (num_base_priors, num_batch_gt, 7)
# 7 is mean (batch_idx, cls_id, x_norm, y_norm,
# w_norm, h_norm, prior_idx)
batch_targets_normed = self._convert_gt_to_norm_format(
batch_gt_instances, batch_img_metas)
scaled_factors = [
torch.tensor(head_pred.shape, device=device)[[3, 2, 3, 2]]
for head_pred in head_preds
]
loss_cls, loss_obj, loss_box = self._calc_loss(
head_preds=head_preds,
head_preds_aux=None,
batch_targets_normed=batch_targets_normed,
near_neighbor_thr=self.near_neighbor_thr,
scaled_factors=scaled_factors,
batch_img_metas=batch_img_metas,
device=device)
if with_aux:
loss_cls_aux, loss_obj_aux, loss_box_aux = self._calc_loss(
head_preds=head_preds,
head_preds_aux=head_preds_aux,
batch_targets_normed=batch_targets_normed,
near_neighbor_thr=self.near_neighbor_thr * 2,
scaled_factors=scaled_factors,
batch_img_metas=batch_img_metas,
device=device)
loss_cls += self.aux_loss_weights * loss_cls_aux
loss_obj += self.aux_loss_weights * loss_obj_aux
loss_box += self.aux_loss_weights * loss_box_aux
_, world_size = get_dist_info()
return dict(
loss_cls=loss_cls * batch_size * world_size,
loss_obj=loss_obj * batch_size * world_size,
loss_bbox=loss_box * batch_size * world_size)
def _calc_loss(self, head_preds, head_preds_aux, batch_targets_normed,
near_neighbor_thr, scaled_factors, batch_img_metas, device):
loss_cls = torch.zeros(1, device=device)
loss_box = torch.zeros(1, device=device)
loss_obj = torch.zeros(1, device=device)
assigner_results = self.assigner(
head_preds,
batch_targets_normed,
batch_img_metas[0]['batch_input_shape'],
self.priors_base_sizes,
self.grid_offset,
near_neighbor_thr=near_neighbor_thr)
# mlvl is mean multi_level
mlvl_positive_infos = assigner_results['mlvl_positive_infos']
mlvl_priors = assigner_results['mlvl_priors']
mlvl_targets_normed = assigner_results['mlvl_targets_normed']
if head_preds_aux is not None:
# This is mean calc aux branch loss
head_preds = head_preds_aux
for i, head_pred in enumerate(head_preds):
batch_inds, proir_idx, grid_x, grid_y = mlvl_positive_infos[i].T
num_pred_positive = batch_inds.shape[0]
target_obj = torch.zeros_like(head_pred[..., 0])
# empty positive sampler
if num_pred_positive == 0:
loss_box += head_pred[..., :4].sum() * 0
loss_cls += head_pred[..., 5:].sum() * 0
loss_obj += self.loss_obj(
head_pred[..., 4], target_obj) * self.obj_level_weights[i]
continue
priors = mlvl_priors[i]
targets_normed = mlvl_targets_normed[i]
head_pred_positive = head_pred[batch_inds, proir_idx, grid_y,
grid_x]
# calc bbox loss
grid_xy = torch.stack([grid_x, grid_y], dim=1)
decoded_pred_bbox = self._decode_bbox_to_xywh(
head_pred_positive[:, :4], priors, grid_xy)
target_bbox_scaled = targets_normed[:, 2:6] * scaled_factors[i]
loss_box_i, iou = self.loss_bbox(decoded_pred_bbox,
target_bbox_scaled)
loss_box += loss_box_i
# calc obj loss
target_obj[batch_inds, proir_idx, grid_y,
grid_x] = iou.detach().clamp(0).type(target_obj.dtype)
loss_obj += self.loss_obj(head_pred[..., 4],
target_obj) * self.obj_level_weights[i]
# calc cls loss
if self.num_classes > 1:
pred_cls_scores = targets_normed[:, 1].long()
target_class = torch.full_like(
head_pred_positive[:, 5:], 0., device=device)
target_class[range(num_pred_positive), pred_cls_scores] = 1.
loss_cls += self.loss_cls(head_pred_positive[:, 5:],
target_class)
else:
loss_cls += head_pred_positive[:, 5:].sum() * 0
return loss_cls, loss_obj, loss_box
def _merge_predict_results(self, bbox_preds: Sequence[Tensor],
objectnesses: Sequence[Tensor],
cls_scores: Sequence[Tensor]) -> List[Tensor]:
"""Merge predict output from 3 heads.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
objectnesses (Sequence[Tensor]): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
Returns:
List[Tensor]: Merged output.
"""
head_preds = []
for bbox_pred, objectness, cls_score in zip(bbox_preds, objectnesses,
cls_scores):
b, _, h, w = bbox_pred.shape
bbox_pred = bbox_pred.reshape(b, self.num_base_priors, -1, h, w)
objectness = objectness.reshape(b, self.num_base_priors, -1, h, w)
cls_score = cls_score.reshape(b, self.num_base_priors, -1, h, w)
head_pred = torch.cat([bbox_pred, objectness, cls_score],
dim=2).permute(0, 1, 3, 4, 2).contiguous()
head_preds.append(head_pred)
return head_preds
def _decode_bbox_to_xywh(self, bbox_pred, priors_base_sizes,
grid_xy) -> Tensor:
bbox_pred = bbox_pred.sigmoid()
pred_xy = bbox_pred[:, :2] * 2 - 0.5 + grid_xy
pred_wh = (bbox_pred[:, 2:] * 2)**2 * priors_base_sizes
decoded_bbox_pred = torch.cat((pred_xy, pred_wh), dim=-1)
return decoded_bbox_pred
| 17,391 | 41.94321 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/rtmdet_rotated_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from typing import List, Optional, Sequence, Tuple
import torch
import torch.nn as nn
from mmdet.models.utils import filter_scores_and_topk
from mmdet.structures.bbox import HorizontalBoxes, distance2bbox
from mmdet.structures.bbox.transforms import bbox_cxcywh_to_xyxy, scale_boxes
from mmdet.utils import (ConfigType, InstanceList, OptConfigType,
OptInstanceList, OptMultiConfig, reduce_mean)
from mmengine.config import ConfigDict
from mmengine.model import normal_init
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS, TASK_UTILS
from ..utils import gt_instances_preprocess
from .rtmdet_head import RTMDetHead, RTMDetSepBNHeadModule
try:
from mmrotate.structures.bbox import RotatedBoxes, distance2obb
MMROTATE_AVAILABLE = True
except ImportError:
RotatedBoxes = None
distance2obb = None
MMROTATE_AVAILABLE = False
@MODELS.register_module()
class RTMDetRotatedSepBNHeadModule(RTMDetSepBNHeadModule):
"""Detection Head Module of RTMDet-R.
Compared with RTMDet Detection Head Module, RTMDet-R adds
a conv for angle prediction.
An `angle_out_dim` arg is added, which is generated by the
angle_coder module and controls the angle pred dim.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_base_priors (int): The number of priors (points) at a point
on the feature grid. Defaults to 1.
feat_channels (int): Number of hidden channels. Used in child classes.
Defaults to 256
stacked_convs (int): Number of stacking convs of the head.
Defaults to 2.
featmap_strides (Sequence[int]): Downsample factor of each feature map.
Defaults to (8, 16, 32).
share_conv (bool): Whether to share conv layers between stages.
Defaults to True.
pred_kernel_size (int): Kernel size of ``nn.Conv2d``. Defaults to 1.
angle_out_dim (int): Encoded length of angle, will passed by head.
Defaults to 1.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Config dict for normalization
layer. Defaults to ``dict(type='BN')``.
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Default: dict(type='SiLU', inplace=True).
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(
self,
num_classes: int,
in_channels: int,
widen_factor: float = 1.0,
num_base_priors: int = 1,
feat_channels: int = 256,
stacked_convs: int = 2,
featmap_strides: Sequence[int] = [8, 16, 32],
share_conv: bool = True,
pred_kernel_size: int = 1,
angle_out_dim: int = 1,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None,
):
self.angle_out_dim = angle_out_dim
super().__init__(
num_classes=num_classes,
in_channels=in_channels,
widen_factor=widen_factor,
num_base_priors=num_base_priors,
feat_channels=feat_channels,
stacked_convs=stacked_convs,
featmap_strides=featmap_strides,
share_conv=share_conv,
pred_kernel_size=pred_kernel_size,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
init_cfg=init_cfg)
def _init_layers(self):
"""Initialize layers of the head."""
super()._init_layers()
self.rtm_ang = nn.ModuleList()
for _ in range(len(self.featmap_strides)):
self.rtm_ang.append(
nn.Conv2d(
self.feat_channels,
self.num_base_priors * self.angle_out_dim,
self.pred_kernel_size,
padding=self.pred_kernel_size // 2))
def init_weights(self) -> None:
"""Initialize weights of the head."""
# Use prior in model initialization to improve stability
super().init_weights()
for rtm_ang in self.rtm_ang:
normal_init(rtm_ang, std=0.01)
def forward(self, feats: Tuple[Tensor, ...]) -> tuple:
"""Forward features from the upstream network.
Args:
feats (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: Usually a tuple of classification scores and bbox prediction
- cls_scores (list[Tensor]): Classification scores for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * 4.
- angle_preds (list[Tensor]): Angle prediction for all scale
levels, each is a 4D-tensor, the channels number is
num_base_priors * angle_out_dim.
"""
cls_scores = []
bbox_preds = []
angle_preds = []
for idx, x in enumerate(feats):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs[idx]:
cls_feat = cls_layer(cls_feat)
cls_score = self.rtm_cls[idx](cls_feat)
for reg_layer in self.reg_convs[idx]:
reg_feat = reg_layer(reg_feat)
reg_dist = self.rtm_reg[idx](reg_feat)
angle_pred = self.rtm_ang[idx](reg_feat)
cls_scores.append(cls_score)
bbox_preds.append(reg_dist)
angle_preds.append(angle_pred)
return tuple(cls_scores), tuple(bbox_preds), tuple(angle_preds)
@MODELS.register_module()
class RTMDetRotatedHead(RTMDetHead):
"""RTMDet-R head.
Compared with RTMDetHead, RTMDetRotatedHead add some args to support
rotated object detection.
- `angle_version` used to limit angle_range during training.
- `angle_coder` used to encode and decode angle, which is similar
to bbox_coder.
- `use_hbbox_loss` and `loss_angle` allow custom regression loss
calculation for rotated box.
There are three combination options for regression:
1. `use_hbbox_loss=False` and loss_angle is None.
.. code:: text
bbox_pred────(tblr)───┐
▼
angle_pred decode──►rbox_pred──(xywha)─►loss_bbox
│ ▲
└────►decode──(a)─┘
2. `use_hbbox_loss=False` and loss_angle is specified.
A angle loss is added on angle_pred.
.. code:: text
bbox_pred────(tblr)───┐
▼
angle_pred decode──►rbox_pred──(xywha)─►loss_bbox
│ ▲
├────►decode──(a)─┘
│
└───────────────────────────────────────────►loss_angle
3. `use_hbbox_loss=True` and loss_angle is specified.
In this case the loss_angle must be set.
.. code:: text
bbox_pred──(tblr)──►decode──►hbox_pred──(xyxy)──►loss_bbox
angle_pred──────────────────────────────────────►loss_angle
- There's a `decoded_with_angle` flag in test_cfg, which is similar
to training process.
When `decoded_with_angle=True`:
.. code:: text
bbox_pred────(tblr)───┐
▼
angle_pred decode──(xywha)──►rbox_pred
│ ▲
└────►decode──(a)─┘
When `decoded_with_angle=False`:
.. code:: text
bbox_pred──(tblr)─►decode
│ (xyxy)
▼
format───(xywh)──►concat──(xywha)──►rbox_pred
▲
angle_pred────────►decode────(a)───────┘
Args:
head_module(ConfigType): Base module used for RTMDetRotatedHead.
prior_generator: Points generator feature maps in
2D points-based detectors.
bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
angle_version (str): Angle representations. Defaults to 'le90'.
use_hbbox_loss (bool): If true, use horizontal bbox loss and
loss_angle should not be None. Default to False.
angle_coder (:obj:`ConfigDict` or dict): Config of angle coder.
loss_angle (:obj:`ConfigDict` or dict, optional): Config of angle loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(
self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.MlvlPointGenerator', strides=[8, 16, 32],
offset=0),
bbox_coder: ConfigType = dict(type='DistanceAnglePointCoder'),
loss_cls: ConfigType = dict(
type='mmdet.QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='mmrotate.RotatedIoULoss', mode='linear',
loss_weight=2.0),
angle_version: str = 'le90',
use_hbbox_loss: bool = False,
angle_coder: ConfigType = dict(type='mmrotate.PseudoAngleCoder'),
loss_angle: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
if not MMROTATE_AVAILABLE:
raise ImportError(
'Please run "mim install -r requirements/mmrotate.txt" '
'to install mmrotate first for rotated detection.')
self.angle_version = angle_version
self.use_hbbox_loss = use_hbbox_loss
if self.use_hbbox_loss:
assert loss_angle is not None, \
('When use hbbox loss, loss_angle needs to be specified')
self.angle_coder = TASK_UTILS.build(angle_coder)
self.angle_out_dim = self.angle_coder.encode_size
if head_module.get('angle_out_dim') is not None:
warnings.warn('angle_out_dim will be overridden by angle_coder '
'and does not need to be set manually')
head_module['angle_out_dim'] = self.angle_out_dim
super().__init__(
head_module=head_module,
prior_generator=prior_generator,
bbox_coder=bbox_coder,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
if loss_angle is not None:
self.loss_angle = MODELS.build(loss_angle)
else:
self.loss_angle = None
def predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
angle_preds: List[Tensor],
objectnesses: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = True,
with_nms: bool = True) -> List[InstanceData]:
"""Transform a batch of output features extracted by the head into bbox
results.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
angle_preds (list[Tensor]): Box angle for each scale level
with shape (N, num_points * angle_dim, H, W)
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
list[:obj:`InstanceData`]: Object detection results of each image
after the post process. Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 5),
the last dimension 4 arrange as (x, y, w, h, angle).
"""
assert len(cls_scores) == len(bbox_preds)
if objectnesses is None:
with_objectnesses = False
else:
with_objectnesses = True
assert len(cls_scores) == len(objectnesses)
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
multi_label = cfg.multi_label
multi_label &= self.num_classes > 1
cfg.multi_label = multi_label
# Whether to decode rbox with angle.
# different setting lead to different final results.
# Defaults to True.
decode_with_angle = cfg.get('decode_with_angle', True)
num_imgs = len(batch_img_metas)
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
# If the shape does not change, use the previous mlvl_priors
if featmap_sizes != self.featmap_sizes:
self.mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device)
self.featmap_sizes = featmap_sizes
flatten_priors = torch.cat(self.mlvl_priors)
mlvl_strides = [
flatten_priors.new_full(
(featmap_size.numel() * self.num_base_priors, ), stride) for
featmap_size, stride in zip(featmap_sizes, self.featmap_strides)
]
flatten_stride = torch.cat(mlvl_strides)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_angle_preds = [
angle_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.angle_out_dim)
for angle_pred in angle_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
flatten_angle_preds = torch.cat(flatten_angle_preds, dim=1)
flatten_angle_preds = self.angle_coder.decode(
flatten_angle_preds, keepdim=True)
if decode_with_angle:
flatten_rbbox_preds = torch.cat(
[flatten_bbox_preds, flatten_angle_preds], dim=-1)
flatten_decoded_bboxes = self.bbox_coder.decode(
flatten_priors[None], flatten_rbbox_preds, flatten_stride)
else:
flatten_decoded_hbboxes = self.bbox_coder.decode(
flatten_priors[None], flatten_bbox_preds, flatten_stride)
flatten_decoded_hbboxes = HorizontalBoxes.xyxy_to_cxcywh(
flatten_decoded_hbboxes)
flatten_decoded_bboxes = torch.cat(
[flatten_decoded_hbboxes, flatten_angle_preds], dim=-1)
if with_objectnesses:
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()
else:
flatten_objectness = [None for _ in range(num_imgs)]
results_list = []
for (bboxes, scores, objectness,
img_meta) in zip(flatten_decoded_bboxes, flatten_cls_scores,
flatten_objectness, batch_img_metas):
scale_factor = img_meta['scale_factor']
if 'pad_param' in img_meta:
pad_param = img_meta['pad_param']
else:
pad_param = None
score_thr = cfg.get('score_thr', -1)
# yolox_style does not require the following operations
if objectness is not None and score_thr > 0 and not cfg.get(
'yolox_style', False):
conf_inds = objectness > score_thr
bboxes = bboxes[conf_inds, :]
scores = scores[conf_inds, :]
objectness = objectness[conf_inds]
if objectness is not None:
# conf = obj_conf * cls_conf
scores *= objectness[:, None]
if scores.shape[0] == 0:
empty_results = InstanceData()
empty_results.bboxes = RotatedBoxes(bboxes)
empty_results.scores = scores[:, 0]
empty_results.labels = scores[:, 0].int()
results_list.append(empty_results)
continue
nms_pre = cfg.get('nms_pre', 100000)
if cfg.multi_label is False:
scores, labels = scores.max(1, keepdim=True)
scores, _, keep_idxs, results = filter_scores_and_topk(
scores,
score_thr,
nms_pre,
results=dict(labels=labels[:, 0]))
labels = results['labels']
else:
scores, labels, keep_idxs, _ = filter_scores_and_topk(
scores, score_thr, nms_pre)
results = InstanceData(
scores=scores,
labels=labels,
bboxes=RotatedBoxes(bboxes[keep_idxs]))
if rescale:
if pad_param is not None:
results.bboxes.translate_([-pad_param[2], -pad_param[0]])
scale_factor = [1 / s for s in img_meta['scale_factor']]
results.bboxes = scale_boxes(results.bboxes, scale_factor)
if cfg.get('yolox_style', False):
# do not need max_per_img
cfg.max_per_img = len(results)
results = self._bbox_post_process(
results=results,
cfg=cfg,
rescale=False,
with_nms=with_nms,
img_meta=img_meta)
results_list.append(results)
return results_list
def loss_by_feat(
self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
angle_preds: List[Tensor],
batch_gt_instances: InstanceList,
batch_img_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Compute losses of the head.
Args:
cls_scores (list[Tensor]): Box scores for each scale level
Has shape (N, num_anchors * num_classes, H, W)
bbox_preds (list[Tensor]): Decoded box for each scale
level with shape (N, num_anchors * 4, H, W) in
[tl_x, tl_y, br_x, br_y] format.
angle_preds (list[Tensor]): Angle prediction for each scale
level with shape (N, num_anchors * angle_out_dim, H, W).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
num_imgs = len(batch_img_metas)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == self.prior_generator.num_levels
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xywha
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
device = cls_scores[0].device
# If the shape does not equal, generate new one
if featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
featmap_sizes, device=device, with_stride=True)
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
flatten_cls_scores = torch.cat([
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.cls_out_channels)
for cls_score in cls_scores
], 1).contiguous()
flatten_tblrs = torch.cat([
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
], 1)
flatten_tblrs = flatten_tblrs * self.flatten_priors_train[..., -1,
None]
flatten_angles = torch.cat([
angle_pred.permute(0, 2, 3, 1).reshape(
num_imgs, -1, self.angle_out_dim) for angle_pred in angle_preds
], 1)
flatten_decoded_angle = self.angle_coder.decode(
flatten_angles, keepdim=True)
flatten_tblra = torch.cat([flatten_tblrs, flatten_decoded_angle],
dim=-1)
flatten_rbboxes = distance2obb(
self.flatten_priors_train[..., :2],
flatten_tblra,
angle_version=self.angle_version)
if self.use_hbbox_loss:
flatten_hbboxes = distance2bbox(self.flatten_priors_train[..., :2],
flatten_tblrs)
assigned_result = self.assigner(flatten_rbboxes.detach(),
flatten_cls_scores.detach(),
self.flatten_priors_train, gt_labels,
gt_bboxes, pad_bbox_flag)
labels = assigned_result['assigned_labels'].reshape(-1)
label_weights = assigned_result['assigned_labels_weights'].reshape(-1)
bbox_targets = assigned_result['assigned_bboxes'].reshape(-1, 5)
assign_metrics = assigned_result['assign_metrics'].reshape(-1)
cls_preds = flatten_cls_scores.reshape(-1, self.num_classes)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = self.num_classes
pos_inds = ((labels >= 0)
& (labels < bg_class_ind)).nonzero().squeeze(1)
avg_factor = reduce_mean(assign_metrics.sum()).clamp_(min=1).item()
loss_cls = self.loss_cls(
cls_preds, (labels, assign_metrics),
label_weights,
avg_factor=avg_factor)
pos_bbox_targets = bbox_targets[pos_inds]
if self.use_hbbox_loss:
bbox_preds = flatten_hbboxes.reshape(-1, 4)
pos_bbox_targets = bbox_cxcywh_to_xyxy(pos_bbox_targets[:, :4])
else:
bbox_preds = flatten_rbboxes.reshape(-1, 5)
angle_preds = flatten_angles.reshape(-1, self.angle_out_dim)
if len(pos_inds) > 0:
loss_bbox = self.loss_bbox(
bbox_preds[pos_inds],
pos_bbox_targets,
weight=assign_metrics[pos_inds],
avg_factor=avg_factor)
loss_angle = angle_preds.sum() * 0
if self.loss_angle is not None:
pos_angle_targets = bbox_targets[pos_inds][:, 4:5]
pos_angle_targets = self.angle_coder.encode(pos_angle_targets)
loss_angle = self.loss_angle(
angle_preds[pos_inds],
pos_angle_targets,
weight=assign_metrics[pos_inds],
avg_factor=avg_factor)
else:
loss_bbox = bbox_preds.sum() * 0
loss_angle = angle_preds.sum() * 0
losses = dict()
losses['loss_cls'] = loss_cls
losses['loss_bbox'] = loss_bbox
if self.loss_angle is not None:
losses['loss_angle'] = loss_angle
return losses
| 26,337 | 40.024922 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/dense_heads/ppyoloe_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.utils import multi_apply
from mmdet.utils import (ConfigType, OptConfigType, OptInstanceList,
OptMultiConfig, reduce_mean)
from mmengine import MessageHub
from mmengine.model import BaseModule, bias_init_with_prob
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS
from ..layers.yolo_bricks import PPYOLOESELayer
from ..utils import gt_instances_preprocess
from .yolov6_head import YOLOv6Head
@MODELS.register_module()
class PPYOLOEHeadModule(BaseModule):
"""PPYOLOEHead head module used in `PPYOLOE.
<https://arxiv.org/abs/2203.16250>`_.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
num_base_priors (int): The number of priors (points) at a point
on the feature grid.
featmap_strides (Sequence[int]): Downsample factor of each feature map.
Defaults to (8, 16, 32).
reg_max (int): Max value of integral set :math: ``{0, ..., reg_max}``
in QFL setting. Defaults to 16.
norm_cfg (dict): Config dict for normalization layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
num_classes: int,
in_channels: Union[int, Sequence],
widen_factor: float = 1.0,
num_base_priors: int = 1,
featmap_strides: Sequence[int] = (8, 16, 32),
reg_max: int = 16,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.1, eps=1e-5),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg=init_cfg)
self.num_classes = num_classes
self.featmap_strides = featmap_strides
self.num_levels = len(self.featmap_strides)
self.num_base_priors = num_base_priors
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.reg_max = reg_max
if isinstance(in_channels, int):
self.in_channels = [int(in_channels * widen_factor)
] * self.num_levels
else:
self.in_channels = [int(i * widen_factor) for i in in_channels]
self._init_layers()
def init_weights(self, prior_prob=0.01):
"""Initialize the weight and bias of PPYOLOE head."""
super().init_weights()
for conv in self.cls_preds:
conv.bias.data.fill_(bias_init_with_prob(prior_prob))
conv.weight.data.fill_(0.)
for conv in self.reg_preds:
conv.bias.data.fill_(1.0)
conv.weight.data.fill_(0.)
def _init_layers(self):
"""initialize conv layers in PPYOLOE head."""
self.cls_preds = nn.ModuleList()
self.reg_preds = nn.ModuleList()
self.cls_stems = nn.ModuleList()
self.reg_stems = nn.ModuleList()
for in_channel in self.in_channels:
self.cls_stems.append(
PPYOLOESELayer(
in_channel, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
self.reg_stems.append(
PPYOLOESELayer(
in_channel, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
for in_channel in self.in_channels:
self.cls_preds.append(
nn.Conv2d(in_channel, self.num_classes, 3, padding=1))
self.reg_preds.append(
nn.Conv2d(in_channel, 4 * (self.reg_max + 1), 3, padding=1))
# init proj
proj = torch.linspace(0, self.reg_max, self.reg_max + 1).view(
[1, self.reg_max + 1, 1, 1])
self.register_buffer('proj', proj, persistent=False)
def forward(self, x: Tuple[Tensor]) -> Tensor:
"""Forward features from the upstream network.
Args:
x (Tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
Tuple[List]: A tuple of multi-level classification scores, bbox
predictions.
"""
assert len(x) == self.num_levels
return multi_apply(self.forward_single, x, self.cls_stems,
self.cls_preds, self.reg_stems, self.reg_preds)
def forward_single(self, x: Tensor, cls_stem: nn.ModuleList,
cls_pred: nn.ModuleList, reg_stem: nn.ModuleList,
reg_pred: nn.ModuleList) -> Tensor:
"""Forward feature of a single scale level."""
b, _, h, w = x.shape
hw = h * w
avg_feat = F.adaptive_avg_pool2d(x, (1, 1))
cls_logit = cls_pred(cls_stem(x, avg_feat) + x)
bbox_dist_preds = reg_pred(reg_stem(x, avg_feat))
# TODO: Test whether use matmul instead of conv can speed up training.
bbox_dist_preds = bbox_dist_preds.reshape(
[-1, 4, self.reg_max + 1, hw]).permute(0, 2, 3, 1)
bbox_preds = F.conv2d(F.softmax(bbox_dist_preds, dim=1), self.proj)
if self.training:
return cls_logit, bbox_preds, bbox_dist_preds
else:
return cls_logit, bbox_preds
@MODELS.register_module()
class PPYOLOEHead(YOLOv6Head):
"""PPYOLOEHead head used in `PPYOLOE <https://arxiv.org/abs/2203.16250>`_.
The YOLOv6 head and the PPYOLOE head are only slightly different.
Distribution focal loss is extra used in PPYOLOE, but not in YOLOv6.
Args:
head_module(ConfigType): Base module used for YOLOv5Head
prior_generator(dict): Points generator feature maps in
2D points-based detectors.
bbox_coder (:obj:`ConfigDict` or dict): Config of bbox coder.
loss_cls (:obj:`ConfigDict` or dict): Config of classification loss.
loss_bbox (:obj:`ConfigDict` or dict): Config of localization loss.
loss_dfl (:obj:`ConfigDict` or dict): Config of distribution focal
loss.
train_cfg (:obj:`ConfigDict` or dict, optional): Training config of
anchor head. Defaults to None.
test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of
anchor head. Defaults to None.
init_cfg (:obj:`ConfigDict` or list[:obj:`ConfigDict`] or dict or
list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
head_module: ConfigType,
prior_generator: ConfigType = dict(
type='mmdet.MlvlPointGenerator',
offset=0.5,
strides=[8, 16, 32]),
bbox_coder: ConfigType = dict(type='DistancePointBBoxCoder'),
loss_cls: ConfigType = dict(
type='mmdet.VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='sum',
loss_weight=1.0),
loss_bbox: ConfigType = dict(
type='IoULoss',
iou_mode='giou',
bbox_format='xyxy',
reduction='mean',
loss_weight=2.5,
return_iou=False),
loss_dfl: ConfigType = dict(
type='mmdet.DistributionFocalLoss',
reduction='mean',
loss_weight=0.5 / 4),
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
init_cfg: OptMultiConfig = None):
super().__init__(
head_module=head_module,
prior_generator=prior_generator,
bbox_coder=bbox_coder,
loss_cls=loss_cls,
loss_bbox=loss_bbox,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
self.loss_dfl = MODELS.build(loss_dfl)
# ppyoloe doesn't need loss_obj
self.loss_obj = None
def loss_by_feat(
self,
cls_scores: Sequence[Tensor],
bbox_preds: Sequence[Tensor],
bbox_dist_preds: Sequence[Tensor],
batch_gt_instances: Sequence[InstanceData],
batch_img_metas: Sequence[dict],
batch_gt_instances_ignore: OptInstanceList = None) -> dict:
"""Calculate the loss based on the features extracted by the detection
head.
Args:
cls_scores (Sequence[Tensor]): Box scores for each scale level,
each is a 4D-tensor, the channel number is
num_priors * num_classes.
bbox_preds (Sequence[Tensor]): Box energies / deltas for each scale
level, each is a 4D-tensor, the channel number is
num_priors * 4.
bbox_dist_preds (Sequence[Tensor]): Box distribution logits for
each scale level with shape (bs, reg_max + 1, H*W, 4).
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes`` and ``labels``
attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict[str, Tensor]: A dictionary of losses.
"""
# get epoch information from message hub
message_hub = MessageHub.get_current_instance()
current_epoch = message_hub.get_info('epoch')
num_imgs = len(batch_img_metas)
current_featmap_sizes = [
cls_score.shape[2:] for cls_score in cls_scores
]
# If the shape does not equal, generate new one
if current_featmap_sizes != self.featmap_sizes_train:
self.featmap_sizes_train = current_featmap_sizes
mlvl_priors_with_stride = self.prior_generator.grid_priors(
self.featmap_sizes_train,
dtype=cls_scores[0].dtype,
device=cls_scores[0].device,
with_stride=True)
self.num_level_priors = [len(n) for n in mlvl_priors_with_stride]
self.flatten_priors_train = torch.cat(
mlvl_priors_with_stride, dim=0)
self.stride_tensor = self.flatten_priors_train[..., [2]]
# gt info
gt_info = gt_instances_preprocess(batch_gt_instances, num_imgs)
gt_labels = gt_info[:, :, :1]
gt_bboxes = gt_info[:, :, 1:] # xyxy
pad_bbox_flag = (gt_bboxes.sum(-1, keepdim=True) > 0).float()
# pred info
flatten_cls_preds = [
cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,
self.num_classes)
for cls_pred in cls_scores
]
flatten_pred_bboxes = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
# (bs, reg_max+1, n, 4) -> (bs, n, 4, reg_max+1)
flatten_pred_dists = [
bbox_pred_org.permute(0, 2, 3, 1).reshape(
num_imgs, -1, (self.head_module.reg_max + 1) * 4)
for bbox_pred_org in bbox_dist_preds
]
flatten_dist_preds = torch.cat(flatten_pred_dists, dim=1)
flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)
flatten_pred_bboxes = torch.cat(flatten_pred_bboxes, dim=1)
flatten_pred_bboxes = self.bbox_coder.decode(
self.flatten_priors_train[..., :2], flatten_pred_bboxes,
self.stride_tensor[..., 0])
pred_scores = torch.sigmoid(flatten_cls_preds)
if current_epoch < self.initial_epoch:
assigned_result = self.initial_assigner(
flatten_pred_bboxes.detach(), self.flatten_priors_train,
self.num_level_priors, gt_labels, gt_bboxes, pad_bbox_flag)
else:
assigned_result = self.assigner(flatten_pred_bboxes.detach(),
pred_scores.detach(),
self.flatten_priors_train,
gt_labels, gt_bboxes,
pad_bbox_flag)
assigned_bboxes = assigned_result['assigned_bboxes']
assigned_scores = assigned_result['assigned_scores']
fg_mask_pre_prior = assigned_result['fg_mask_pre_prior']
# cls loss
with torch.cuda.amp.autocast(enabled=False):
loss_cls = self.loss_cls(flatten_cls_preds, assigned_scores)
# rescale bbox
assigned_bboxes /= self.stride_tensor
flatten_pred_bboxes /= self.stride_tensor
assigned_scores_sum = assigned_scores.sum()
# reduce_mean between all gpus
assigned_scores_sum = torch.clamp(
reduce_mean(assigned_scores_sum), min=1)
loss_cls /= assigned_scores_sum
# select positive samples mask
num_pos = fg_mask_pre_prior.sum()
if num_pos > 0:
# when num_pos > 0, assigned_scores_sum will >0, so the loss_bbox
# will not report an error
# iou loss
prior_bbox_mask = fg_mask_pre_prior.unsqueeze(-1).repeat([1, 1, 4])
pred_bboxes_pos = torch.masked_select(
flatten_pred_bboxes, prior_bbox_mask).reshape([-1, 4])
assigned_bboxes_pos = torch.masked_select(
assigned_bboxes, prior_bbox_mask).reshape([-1, 4])
bbox_weight = torch.masked_select(
assigned_scores.sum(-1), fg_mask_pre_prior).unsqueeze(-1)
loss_bbox = self.loss_bbox(
pred_bboxes_pos,
assigned_bboxes_pos,
weight=bbox_weight,
avg_factor=assigned_scores_sum)
# dfl loss
dist_mask = fg_mask_pre_prior.unsqueeze(-1).repeat(
[1, 1, (self.head_module.reg_max + 1) * 4])
pred_dist_pos = torch.masked_select(
flatten_dist_preds,
dist_mask).reshape([-1, 4, self.head_module.reg_max + 1])
assigned_ltrb = self.bbox_coder.encode(
self.flatten_priors_train[..., :2] / self.stride_tensor,
assigned_bboxes,
max_dis=self.head_module.reg_max,
eps=0.01)
assigned_ltrb_pos = torch.masked_select(
assigned_ltrb, prior_bbox_mask).reshape([-1, 4])
loss_dfl = self.loss_dfl(
pred_dist_pos.reshape(-1, self.head_module.reg_max + 1),
assigned_ltrb_pos.reshape(-1),
weight=bbox_weight.expand(-1, 4).reshape(-1),
avg_factor=assigned_scores_sum)
else:
loss_bbox = flatten_pred_bboxes.sum() * 0
loss_dfl = flatten_pred_bboxes.sum() * 0
return dict(loss_cls=loss_cls, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
| 15,834 | 41.226667 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/utils/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Sequence, Union
import torch
from mmdet.structures.bbox.transforms import get_box_tensor
from torch import Tensor
def make_divisible(x: float,
widen_factor: float = 1.0,
divisor: int = 8) -> int:
"""Make sure that x*widen_factor is divisible by divisor."""
return math.ceil(x * widen_factor / divisor) * divisor
def make_round(x: float, deepen_factor: float = 1.0) -> int:
"""Make sure that x*deepen_factor becomes an integer not less than 1."""
return max(round(x * deepen_factor), 1) if x > 1 else x
def gt_instances_preprocess(batch_gt_instances: Union[Tensor, Sequence],
batch_size: int) -> Tensor:
"""Split batch_gt_instances with batch size.
From [all_gt_bboxes, box_dim+2] to [batch_size, number_gt, box_dim+1].
For horizontal box, box_dim=4, for rotated box, box_dim=5
If some shape of single batch smaller than
gt bbox len, then using zeros to fill.
Args:
batch_gt_instances (Sequence[Tensor]): Ground truth
instances for whole batch, shape [all_gt_bboxes, box_dim+2]
batch_size (int): Batch size.
Returns:
Tensor: batch gt instances data, shape
[batch_size, number_gt, box_dim+1]
"""
if isinstance(batch_gt_instances, Sequence):
max_gt_bbox_len = max(
[len(gt_instances) for gt_instances in batch_gt_instances])
# fill zeros with length box_dim+1 if some shape of
# single batch not equal max_gt_bbox_len
batch_instance_list = []
for index, gt_instance in enumerate(batch_gt_instances):
bboxes = gt_instance.bboxes
labels = gt_instance.labels
box_dim = get_box_tensor(bboxes).size(-1)
batch_instance_list.append(
torch.cat((labels[:, None], bboxes), dim=-1))
if bboxes.shape[0] >= max_gt_bbox_len:
continue
fill_tensor = bboxes.new_full(
[max_gt_bbox_len - bboxes.shape[0], box_dim + 1], 0)
batch_instance_list[index] = torch.cat(
(batch_instance_list[index], fill_tensor), dim=0)
return torch.stack(batch_instance_list)
else:
# faster version
# format of batch_gt_instances: [img_ind, cls_ind, (box)]
# For example horizontal box should be:
# [img_ind, cls_ind, x1, y1, x2, y2]
# Rotated box should be
# [img_ind, cls_ind, x, y, w, h, a]
# sqlit batch gt instance [all_gt_bboxes, box_dim+2] ->
# [batch_size, max_gt_bbox_len, box_dim+1]
assert isinstance(batch_gt_instances, Tensor)
box_dim = batch_gt_instances.size(-1) - 2
if len(batch_gt_instances) > 0:
gt_images_indexes = batch_gt_instances[:, 0]
max_gt_bbox_len = gt_images_indexes.unique(
return_counts=True)[1].max()
# fill zeros with length box_dim+1 if some shape of
# single batch not equal max_gt_bbox_len
batch_instance = torch.zeros(
(batch_size, max_gt_bbox_len, box_dim + 1),
dtype=batch_gt_instances.dtype,
device=batch_gt_instances.device)
for i in range(batch_size):
match_indexes = gt_images_indexes == i
gt_num = match_indexes.sum()
if gt_num:
batch_instance[i, :gt_num] = batch_gt_instances[
match_indexes, 1:]
else:
batch_instance = torch.zeros((batch_size, 0, box_dim + 1),
dtype=batch_gt_instances.dtype,
device=batch_gt_instances.device)
return batch_instance
| 3,851 | 38.306122 | 76 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/assigners/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch
import torch.nn.functional as F
from torch import Tensor
def select_candidates_in_gts(priors_points: Tensor,
gt_bboxes: Tensor,
eps: float = 1e-9) -> Tensor:
"""Select the positive priors' center in gt.
Args:
priors_points (Tensor): Model priors points,
shape(num_priors, 2)
gt_bboxes (Tensor): Ground true bboxes,
shape(batch_size, num_gt, 4)
eps (float): Default to 1e-9.
Return:
(Tensor): shape(batch_size, num_gt, num_priors)
"""
batch_size, num_gt, _ = gt_bboxes.size()
gt_bboxes = gt_bboxes.reshape([-1, 4])
priors_number = priors_points.size(0)
priors_points = priors_points.unsqueeze(0).repeat(batch_size * num_gt, 1,
1)
# calculate the left, top, right, bottom distance between positive
# prior center and gt side
gt_bboxes_lt = gt_bboxes[:, 0:2].unsqueeze(1).repeat(1, priors_number, 1)
gt_bboxes_rb = gt_bboxes[:, 2:4].unsqueeze(1).repeat(1, priors_number, 1)
bbox_deltas = torch.cat(
[priors_points - gt_bboxes_lt, gt_bboxes_rb - priors_points], dim=-1)
bbox_deltas = bbox_deltas.reshape([batch_size, num_gt, priors_number, -1])
return (bbox_deltas.min(axis=-1)[0] > eps).to(gt_bboxes.dtype)
def select_highest_overlaps(pos_mask: Tensor, overlaps: Tensor,
num_gt: int) -> Tuple[Tensor, Tensor, Tensor]:
"""If an anchor box is assigned to multiple gts, the one with the highest
iou will be selected.
Args:
pos_mask (Tensor): The assigned positive sample mask,
shape(batch_size, num_gt, num_priors)
overlaps (Tensor): IoU between all bbox and ground truth,
shape(batch_size, num_gt, num_priors)
num_gt (int): Number of ground truth.
Return:
gt_idx_pre_prior (Tensor): Target ground truth index,
shape(batch_size, num_priors)
fg_mask_pre_prior (Tensor): Force matching ground truth,
shape(batch_size, num_priors)
pos_mask (Tensor): The assigned positive sample mask,
shape(batch_size, num_gt, num_priors)
"""
fg_mask_pre_prior = pos_mask.sum(axis=-2)
# Make sure the positive sample matches the only one and is the largest IoU
if fg_mask_pre_prior.max() > 1:
mask_multi_gts = (fg_mask_pre_prior.unsqueeze(1) > 1).repeat(
[1, num_gt, 1])
index = overlaps.argmax(axis=1)
is_max_overlaps = F.one_hot(index, num_gt)
is_max_overlaps = \
is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype)
pos_mask = torch.where(mask_multi_gts, is_max_overlaps, pos_mask)
fg_mask_pre_prior = pos_mask.sum(axis=-2)
gt_idx_pre_prior = pos_mask.argmax(axis=-2)
return gt_idx_pre_prior, fg_mask_pre_prior, pos_mask
# TODO:'mmdet.BboxOverlaps2D' will cause gradient inconsistency,
# which will be found and solved in a later version.
def yolov6_iou_calculator(bbox1: Tensor,
bbox2: Tensor,
eps: float = 1e-9) -> Tensor:
"""Calculate iou for batch.
Args:
bbox1 (Tensor): shape(batch size, num_gt, 4)
bbox2 (Tensor): shape(batch size, num_priors, 4)
eps (float): Default to 1e-9.
Return:
(Tensor): IoU, shape(size, num_gt, num_priors)
"""
bbox1 = bbox1.unsqueeze(2) # [N, M1, 4] -> [N, M1, 1, 4]
bbox2 = bbox2.unsqueeze(1) # [N, M2, 4] -> [N, 1, M2, 4]
# calculate xy info of predict and gt bbox
bbox1_x1y1, bbox1_x2y2 = bbox1[:, :, :, 0:2], bbox1[:, :, :, 2:4]
bbox2_x1y1, bbox2_x2y2 = bbox2[:, :, :, 0:2], bbox2[:, :, :, 2:4]
# calculate overlap area
overlap = (torch.minimum(bbox1_x2y2, bbox2_x2y2) -
torch.maximum(bbox1_x1y1, bbox2_x1y1)).clip(0).prod(-1)
# calculate bbox area
bbox1_area = (bbox1_x2y2 - bbox1_x1y1).clip(0).prod(-1)
bbox2_area = (bbox2_x2y2 - bbox2_x1y1).clip(0).prod(-1)
union = bbox1_area + bbox2_area - overlap + eps
return overlap / union
| 4,202 | 36.864865 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/assigners/batch_dsl_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.structures.bbox import BaseBoxes
from mmdet.utils import ConfigType
from torch import Tensor
from mmyolo.registry import TASK_UTILS
INF = 100000000
EPS = 1.0e-7
def find_inside_points(boxes: Tensor,
points: Tensor,
box_dim: int = 4,
eps: float = 0.01) -> Tensor:
"""Find inside box points in batches. Boxes dimension must be 3.
Args:
boxes (Tensor): Boxes tensor. Must be batch input.
Has shape of (batch_size, n_boxes, box_dim).
points (Tensor): Points coordinates. Has shape of (n_points, 2).
box_dim (int): The dimension of box. 4 means horizontal box and
5 means rotated box. Defaults to 4.
eps (float): Make sure the points are inside not on the boundary.
Only use in rotated boxes. Defaults to 0.01.
Returns:
Tensor: A BoolTensor indicating whether a point is inside
boxes. The index has shape of (n_points, batch_size, n_boxes).
"""
if box_dim == 4:
# Horizontal Boxes
lt_ = points[:, None, None] - boxes[..., :2]
rb_ = boxes[..., 2:] - points[:, None, None]
deltas = torch.cat([lt_, rb_], dim=-1)
is_in_gts = deltas.min(dim=-1).values > 0
elif box_dim == 5:
# Rotated Boxes
points = points[:, None, None]
ctrs, wh, t = torch.split(boxes, [2, 2, 1], dim=-1)
cos_value, sin_value = torch.cos(t), torch.sin(t)
matrix = torch.cat([cos_value, sin_value, -sin_value, cos_value],
dim=-1).reshape(*boxes.shape[:-1], 2, 2)
offset = points - ctrs
offset = torch.matmul(matrix, offset[..., None])
offset = offset.squeeze(-1)
offset_x, offset_y = offset[..., 0], offset[..., 1]
w, h = wh[..., 0], wh[..., 1]
is_in_gts = (offset_x <= w / 2 - eps) & (offset_x >= - w / 2 + eps) & \
(offset_y <= h / 2 - eps) & (offset_y >= - h / 2 + eps)
else:
raise NotImplementedError(f'Unsupport box_dim:{box_dim}')
return is_in_gts
def get_box_center(boxes: Tensor, box_dim: int = 4) -> Tensor:
"""Return a tensor representing the centers of boxes.
Args:
boxes (Tensor): Boxes tensor. Has shape of (b, n, box_dim)
box_dim (int): The dimension of box. 4 means horizontal box and
5 means rotated box. Defaults to 4.
Returns:
Tensor: Centers have shape of (b, n, 2)
"""
if box_dim == 4:
# Horizontal Boxes, (x1, y1, x2, y2)
return (boxes[..., :2] + boxes[..., 2:]) / 2.0
elif box_dim == 5:
# Rotated Boxes, (x, y, w, h, a)
return boxes[..., :2]
else:
raise NotImplementedError(f'Unsupported box_dim:{box_dim}')
@TASK_UTILS.register_module()
class BatchDynamicSoftLabelAssigner(nn.Module):
"""Computes matching between predictions and ground truth with dynamic soft
label assignment.
Args:
num_classes (int): number of class
soft_center_radius (float): Radius of the soft center prior.
Defaults to 3.0.
topk (int): Select top-k predictions to calculate dynamic k
best matches for each gt. Defaults to 13.
iou_weight (float): The scale factor of iou cost. Defaults to 3.0.
iou_calculator (ConfigType): Config of overlaps Calculator.
Defaults to dict(type='BboxOverlaps2D').
batch_iou (bool): Use batch input when calculate IoU.
If set to False use loop instead. Defaults to True.
"""
def __init__(
self,
num_classes,
soft_center_radius: float = 3.0,
topk: int = 13,
iou_weight: float = 3.0,
iou_calculator: ConfigType = dict(type='mmdet.BboxOverlaps2D'),
batch_iou: bool = True,
) -> None:
super().__init__()
self.num_classes = num_classes
self.soft_center_radius = soft_center_radius
self.topk = topk
self.iou_weight = iou_weight
self.iou_calculator = TASK_UTILS.build(iou_calculator)
self.batch_iou = batch_iou
@torch.no_grad()
def forward(self, pred_bboxes: Tensor, pred_scores: Tensor, priors: Tensor,
gt_labels: Tensor, gt_bboxes: Tensor,
pad_bbox_flag: Tensor) -> dict:
num_gt = gt_bboxes.size(1)
decoded_bboxes = pred_bboxes
batch_size, num_bboxes, box_dim = decoded_bboxes.size()
if num_gt == 0 or num_bboxes == 0:
return {
'assigned_labels':
gt_labels.new_full(
pred_scores[..., 0].shape,
self.num_classes,
dtype=torch.long),
'assigned_labels_weights':
gt_bboxes.new_full(pred_scores[..., 0].shape, 1),
'assigned_bboxes':
gt_bboxes.new_full(pred_bboxes.shape, 0),
'assign_metrics':
gt_bboxes.new_full(pred_scores[..., 0].shape, 0)
}
prior_center = priors[:, :2]
if isinstance(gt_bboxes, BaseBoxes):
raise NotImplementedError(
f'type of {type(gt_bboxes)} are not implemented !')
else:
is_in_gts = find_inside_points(gt_bboxes, prior_center, box_dim)
# (N_points, B, N_boxes)
is_in_gts = is_in_gts * pad_bbox_flag[..., 0][None]
# (N_points, B, N_boxes) -> (B, N_points, N_boxes)
is_in_gts = is_in_gts.permute(1, 0, 2)
# (B, N_points)
valid_mask = is_in_gts.sum(dim=-1) > 0
gt_center = get_box_center(gt_bboxes, box_dim)
strides = priors[..., 2]
distance = (priors[None].unsqueeze(2)[..., :2] -
gt_center[:, None, :, :]
).pow(2).sum(-1).sqrt() / strides[None, :, None]
# prevent overflow
distance = distance * valid_mask.unsqueeze(-1)
soft_center_prior = torch.pow(10, distance - self.soft_center_radius)
if self.batch_iou:
pairwise_ious = self.iou_calculator(decoded_bboxes, gt_bboxes)
else:
ious = []
for box, gt in zip(decoded_bboxes, gt_bboxes):
iou = self.iou_calculator(box, gt)
ious.append(iou)
pairwise_ious = torch.stack(ious, dim=0)
iou_cost = -torch.log(pairwise_ious + EPS) * self.iou_weight
# select the predicted scores corresponded to the gt_labels
pairwise_pred_scores = pred_scores.permute(0, 2, 1)
idx = torch.zeros([2, batch_size, num_gt], dtype=torch.long)
idx[0] = torch.arange(end=batch_size).view(-1, 1).repeat(1, num_gt)
idx[1] = gt_labels.long().squeeze(-1)
pairwise_pred_scores = pairwise_pred_scores[idx[0],
idx[1]].permute(0, 2, 1)
# classification cost
scale_factor = pairwise_ious - pairwise_pred_scores.sigmoid()
pairwise_cls_cost = F.binary_cross_entropy_with_logits(
pairwise_pred_scores, pairwise_ious,
reduction='none') * scale_factor.abs().pow(2.0)
cost_matrix = pairwise_cls_cost + iou_cost + soft_center_prior
max_pad_value = torch.ones_like(cost_matrix) * INF
cost_matrix = torch.where(valid_mask[..., None].repeat(1, 1, num_gt),
cost_matrix, max_pad_value)
(matched_pred_ious, matched_gt_inds,
fg_mask_inboxes) = self.dynamic_k_matching(cost_matrix, pairwise_ious,
pad_bbox_flag)
del pairwise_ious, cost_matrix
batch_index = (fg_mask_inboxes > 0).nonzero(as_tuple=True)[0]
assigned_labels = gt_labels.new_full(pred_scores[..., 0].shape,
self.num_classes)
assigned_labels[fg_mask_inboxes] = gt_labels[
batch_index, matched_gt_inds].squeeze(-1)
assigned_labels = assigned_labels.long()
assigned_labels_weights = gt_bboxes.new_full(pred_scores[..., 0].shape,
1)
assigned_bboxes = gt_bboxes.new_full(pred_bboxes.shape, 0)
assigned_bboxes[fg_mask_inboxes] = gt_bboxes[batch_index,
matched_gt_inds]
assign_metrics = gt_bboxes.new_full(pred_scores[..., 0].shape, 0)
assign_metrics[fg_mask_inboxes] = matched_pred_ious
return dict(
assigned_labels=assigned_labels,
assigned_labels_weights=assigned_labels_weights,
assigned_bboxes=assigned_bboxes,
assign_metrics=assign_metrics)
def dynamic_k_matching(
self, cost_matrix: Tensor, pairwise_ious: Tensor,
pad_bbox_flag: int) -> Tuple[Tensor, Tensor, Tensor]:
"""Use IoU and matching cost to calculate the dynamic top-k positive
targets.
Args:
cost_matrix (Tensor): Cost matrix.
pairwise_ious (Tensor): Pairwise iou matrix.
num_gt (int): Number of gt.
valid_mask (Tensor): Mask for valid bboxes.
Returns:
tuple: matched ious and gt indexes.
"""
matching_matrix = torch.zeros_like(cost_matrix, dtype=torch.uint8)
# select candidate topk ious for dynamic-k calculation
candidate_topk = min(self.topk, pairwise_ious.size(1))
topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=1)
# calculate dynamic k for each gt
dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
num_gts = pad_bbox_flag.sum((1, 2)).int()
# sorting the batch cost matirx is faster than topk
_, sorted_indices = torch.sort(cost_matrix, dim=1)
for b in range(pad_bbox_flag.shape[0]):
for gt_idx in range(num_gts[b]):
topk_ids = sorted_indices[b, :dynamic_ks[b, gt_idx], gt_idx]
matching_matrix[b, :, gt_idx][topk_ids] = 1
del topk_ious, dynamic_ks
prior_match_gt_mask = matching_matrix.sum(2) > 1
if prior_match_gt_mask.sum() > 0:
cost_min, cost_argmin = torch.min(
cost_matrix[prior_match_gt_mask, :], dim=1)
matching_matrix[prior_match_gt_mask, :] *= 0
matching_matrix[prior_match_gt_mask, cost_argmin] = 1
# get foreground mask inside box and center prior
fg_mask_inboxes = matching_matrix.sum(2) > 0
matched_pred_ious = (matching_matrix *
pairwise_ious).sum(2)[fg_mask_inboxes]
matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)
return matched_pred_ious, matched_gt_inds, fg_mask_inboxes
| 10,901 | 38.934066 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/assigners/batch_yolov7_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_overlaps
def _cat_multi_level_tensor_in_place(*multi_level_tensor, place_hold_var):
"""concat multi-level tensor in place."""
for level_tensor in multi_level_tensor:
for i, var in enumerate(level_tensor):
if len(var) > 0:
level_tensor[i] = torch.cat(var, dim=0)
else:
level_tensor[i] = place_hold_var
class BatchYOLOv7Assigner(nn.Module):
"""Batch YOLOv7 Assigner.
It consists of two assigning steps:
1. YOLOv5 cross-grid sample assigning
2. SimOTA assigning
This code referenced to
https://github.com/WongKinYiu/yolov7/blob/main/utils/loss.py.
Args:
num_classes (int): Number of classes.
num_base_priors (int): Number of base priors.
featmap_strides (Sequence[int]): Feature map strides.
prior_match_thr (float): Threshold to match priors.
Defaults to 4.0.
candidate_topk (int): Number of topk candidates to
assign. Defaults to 10.
iou_weight (float): IOU weight. Defaults to 3.0.
cls_weight (float): Class weight. Defaults to 1.0.
"""
def __init__(self,
num_classes: int,
num_base_priors: int,
featmap_strides: Sequence[int],
prior_match_thr: float = 4.0,
candidate_topk: int = 10,
iou_weight: float = 3.0,
cls_weight: float = 1.0):
super().__init__()
self.num_classes = num_classes
self.num_base_priors = num_base_priors
self.featmap_strides = featmap_strides
# yolov5 param
self.prior_match_thr = prior_match_thr
# simota param
self.candidate_topk = candidate_topk
self.iou_weight = iou_weight
self.cls_weight = cls_weight
@torch.no_grad()
def forward(self,
pred_results,
batch_targets_normed,
batch_input_shape,
priors_base_sizes,
grid_offset,
near_neighbor_thr=0.5) -> dict:
"""Forward function."""
# (num_base_priors, num_batch_gt, 7)
# 7 is mean (batch_idx, cls_id, x_norm, y_norm,
# w_norm, h_norm, prior_idx)
# mlvl is mean multi_level
if batch_targets_normed.shape[1] == 0:
# empty gt of batch
num_levels = len(pred_results)
return dict(
mlvl_positive_infos=[pred_results[0].new_empty(
(0, 4))] * num_levels,
mlvl_priors=[] * num_levels,
mlvl_targets_normed=[] * num_levels)
# if near_neighbor_thr = 0.5 are mean the nearest
# 3 neighbors are also considered positive samples.
# if near_neighbor_thr = 1.0 are mean the nearest
# 5 neighbors are also considered positive samples.
mlvl_positive_infos, mlvl_priors = self.yolov5_assigner(
pred_results,
batch_targets_normed,
priors_base_sizes,
grid_offset,
near_neighbor_thr=near_neighbor_thr)
mlvl_positive_infos, mlvl_priors, \
mlvl_targets_normed = self.simota_assigner(
pred_results, batch_targets_normed, mlvl_positive_infos,
mlvl_priors, batch_input_shape)
place_hold_var = batch_targets_normed.new_empty((0, 4))
_cat_multi_level_tensor_in_place(
mlvl_positive_infos,
mlvl_priors,
mlvl_targets_normed,
place_hold_var=place_hold_var)
return dict(
mlvl_positive_infos=mlvl_positive_infos,
mlvl_priors=mlvl_priors,
mlvl_targets_normed=mlvl_targets_normed)
def yolov5_assigner(self,
pred_results,
batch_targets_normed,
priors_base_sizes,
grid_offset,
near_neighbor_thr=0.5):
"""YOLOv5 cross-grid sample assigner."""
num_batch_gts = batch_targets_normed.shape[1]
assert num_batch_gts > 0
mlvl_positive_infos, mlvl_priors = [], []
scaled_factor = torch.ones(7, device=pred_results[0].device)
for i in range(len(pred_results)): # lever
priors_base_sizes_i = priors_base_sizes[i]
# (1, 1, feat_shape_w, feat_shape_h, feat_shape_w, feat_shape_h)
scaled_factor[2:6] = torch.tensor(
pred_results[i].shape)[[3, 2, 3, 2]]
# Scale batch_targets from range 0-1 to range 0-features_maps size.
# (num_base_priors, num_batch_gts, 7)
batch_targets_scaled = batch_targets_normed * scaled_factor
# Shape match
wh_ratio = batch_targets_scaled[...,
4:6] / priors_base_sizes_i[:, None]
match_inds = torch.max(
wh_ratio, 1. / wh_ratio).max(2)[0] < self.prior_match_thr
batch_targets_scaled = batch_targets_scaled[
match_inds] # (num_matched_target, 7)
# no gt bbox matches anchor
if batch_targets_scaled.shape[0] == 0:
mlvl_positive_infos.append(
batch_targets_scaled.new_empty((0, 4)))
mlvl_priors.append([])
continue
# Positive samples with additional neighbors
batch_targets_cxcy = batch_targets_scaled[:, 2:4]
grid_xy = scaled_factor[[2, 3]] - batch_targets_cxcy
left, up = ((batch_targets_cxcy % 1 < near_neighbor_thr) &
(batch_targets_cxcy > 1)).T
right, bottom = ((grid_xy % 1 < near_neighbor_thr) &
(grid_xy > 1)).T
offset_inds = torch.stack(
(torch.ones_like(left), left, up, right, bottom))
batch_targets_scaled = batch_targets_scaled.repeat(
(5, 1, 1))[offset_inds] # ()
retained_offsets = grid_offset.repeat(1, offset_inds.shape[1],
1)[offset_inds]
# batch_targets_scaled: (num_matched_target, 7)
# 7 is mean (batch_idx, cls_id, x_scaled,
# y_scaled, w_scaled, h_scaled, prior_idx)
# mlvl_positive_info: (num_matched_target, 4)
# 4 is mean (batch_idx, prior_idx, x_scaled, y_scaled)
mlvl_positive_info = batch_targets_scaled[:, [0, 6, 2, 3]]
retained_offsets = retained_offsets * near_neighbor_thr
mlvl_positive_info[:,
2:] = mlvl_positive_info[:,
2:] - retained_offsets
mlvl_positive_info[:, 2].clamp_(0, scaled_factor[2] - 1)
mlvl_positive_info[:, 3].clamp_(0, scaled_factor[3] - 1)
mlvl_positive_info = mlvl_positive_info.long()
priors_inds = mlvl_positive_info[:, 1]
mlvl_positive_infos.append(mlvl_positive_info)
mlvl_priors.append(priors_base_sizes_i[priors_inds])
return mlvl_positive_infos, mlvl_priors
def simota_assigner(self, pred_results, batch_targets_normed,
mlvl_positive_infos, mlvl_priors, batch_input_shape):
"""SimOTA assigner."""
num_batch_gts = batch_targets_normed.shape[1]
assert num_batch_gts > 0
num_levels = len(mlvl_positive_infos)
mlvl_positive_infos_matched = [[] for _ in range(num_levels)]
mlvl_priors_matched = [[] for _ in range(num_levels)]
mlvl_targets_normed_matched = [[] for _ in range(num_levels)]
for batch_idx in range(pred_results[0].shape[0]):
# (num_batch_gt, 7)
# 7 is mean (batch_idx, cls_id, x_norm, y_norm,
# w_norm, h_norm, prior_idx)
targets_normed = batch_targets_normed[0]
# (num_gt, 7)
targets_normed = targets_normed[targets_normed[:, 0] == batch_idx]
num_gts = targets_normed.shape[0]
if num_gts == 0:
continue
_mlvl_decoderd_bboxes = []
_mlvl_obj_cls = []
_mlvl_priors = []
_mlvl_positive_infos = []
_from_which_layer = []
for i, head_pred in enumerate(pred_results):
# (num_matched_target, 4)
# 4 is mean (batch_idx, prior_idx, grid_x, grid_y)
_mlvl_positive_info = mlvl_positive_infos[i]
if _mlvl_positive_info.shape[0] == 0:
continue
idx = (_mlvl_positive_info[:, 0] == batch_idx)
_mlvl_positive_info = _mlvl_positive_info[idx]
_mlvl_positive_infos.append(_mlvl_positive_info)
priors = mlvl_priors[i][idx]
_mlvl_priors.append(priors)
_from_which_layer.append(
_mlvl_positive_info.new_full(
size=(_mlvl_positive_info.shape[0], ), fill_value=i))
# (n,85)
level_batch_idx, prior_ind, \
grid_x, grid_y = _mlvl_positive_info.T
pred_positive = head_pred[level_batch_idx, prior_ind, grid_y,
grid_x]
_mlvl_obj_cls.append(pred_positive[:, 4:])
# decoded
grid = torch.stack([grid_x, grid_y], dim=1)
pred_positive_cxcy = (pred_positive[:, :2].sigmoid() * 2. -
0.5 + grid) * self.featmap_strides[i]
pred_positive_wh = (pred_positive[:, 2:4].sigmoid() * 2) ** 2 \
* priors * self.featmap_strides[i]
pred_positive_xywh = torch.cat(
[pred_positive_cxcy, pred_positive_wh], dim=-1)
_mlvl_decoderd_bboxes.append(pred_positive_xywh)
if len(_mlvl_decoderd_bboxes) == 0:
continue
# 1 calc pair_wise_iou_loss
_mlvl_decoderd_bboxes = torch.cat(_mlvl_decoderd_bboxes, dim=0)
num_pred_positive = _mlvl_decoderd_bboxes.shape[0]
if num_pred_positive == 0:
continue
# scaled xywh
batch_input_shape_wh = pred_results[0].new_tensor(
batch_input_shape[::-1]).repeat((1, 2))
targets_scaled_bbox = targets_normed[:, 2:6] * batch_input_shape_wh
targets_scaled_bbox = bbox_cxcywh_to_xyxy(targets_scaled_bbox)
_mlvl_decoderd_bboxes = bbox_cxcywh_to_xyxy(_mlvl_decoderd_bboxes)
pair_wise_iou = bbox_overlaps(targets_scaled_bbox,
_mlvl_decoderd_bboxes)
pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
# 2 calc pair_wise_cls_loss
_mlvl_obj_cls = torch.cat(_mlvl_obj_cls, dim=0).float().sigmoid()
_mlvl_positive_infos = torch.cat(_mlvl_positive_infos, dim=0)
_from_which_layer = torch.cat(_from_which_layer, dim=0)
_mlvl_priors = torch.cat(_mlvl_priors, dim=0)
gt_cls_per_image = (
F.one_hot(targets_normed[:, 1].to(torch.int64),
self.num_classes).float().unsqueeze(1).repeat(
1, num_pred_positive, 1))
# cls_score * obj
cls_preds_ = _mlvl_obj_cls[:, 1:]\
.unsqueeze(0)\
.repeat(num_gts, 1, 1) \
* _mlvl_obj_cls[:, 0:1]\
.unsqueeze(0).repeat(num_gts, 1, 1)
y = cls_preds_.sqrt_()
pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
torch.log(y / (1 - y)), gt_cls_per_image,
reduction='none').sum(-1)
del cls_preds_
# calc cost
cost = (
self.cls_weight * pair_wise_cls_loss +
self.iou_weight * pair_wise_iou_loss)
# num_gt, num_match_pred
matching_matrix = torch.zeros_like(cost)
top_k, _ = torch.topk(
pair_wise_iou,
min(self.candidate_topk, pair_wise_iou.shape[1]),
dim=1)
dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
# Select only topk matches per gt
for gt_idx in range(num_gts):
_, pos_idx = torch.topk(
cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)
matching_matrix[gt_idx][pos_idx] = 1.0
del top_k, dynamic_ks
# Each prediction box can match at most one gt box,
# and if there are more than one,
# only the least costly one can be taken
anchor_matching_gt = matching_matrix.sum(0)
if (anchor_matching_gt > 1).sum() > 0:
_, cost_argmin = torch.min(
cost[:, anchor_matching_gt > 1], dim=0)
matching_matrix[:, anchor_matching_gt > 1] *= 0.0
matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
fg_mask_inboxes = matching_matrix.sum(0) > 0.0
matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
targets_normed = targets_normed[matched_gt_inds]
_mlvl_positive_infos = _mlvl_positive_infos[fg_mask_inboxes]
_from_which_layer = _from_which_layer[fg_mask_inboxes]
_mlvl_priors = _mlvl_priors[fg_mask_inboxes]
# Rearranged in the order of the prediction layers
# to facilitate loss
for i in range(num_levels):
layer_idx = _from_which_layer == i
mlvl_positive_infos_matched[i].append(
_mlvl_positive_infos[layer_idx])
mlvl_priors_matched[i].append(_mlvl_priors[layer_idx])
mlvl_targets_normed_matched[i].append(
targets_normed[layer_idx])
results = mlvl_positive_infos_matched, \
mlvl_priors_matched, \
mlvl_targets_normed_matched
return results
| 14,354 | 40.608696 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/assigners/batch_atss_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.utils import ConfigType
from torch import Tensor
from mmyolo.registry import TASK_UTILS
from .utils import (select_candidates_in_gts, select_highest_overlaps,
yolov6_iou_calculator)
def bbox_center_distance(bboxes: Tensor,
priors: Tensor) -> Tuple[Tensor, Tensor]:
"""Compute the center distance between bboxes and priors.
Args:
bboxes (Tensor): Shape (n, 4) for bbox, "xyxy" format.
priors (Tensor): Shape (num_priors, 4) for priors, "xyxy" format.
Returns:
distances (Tensor): Center distances between bboxes and priors,
shape (num_priors, n).
priors_points (Tensor): Priors cx cy points,
shape (num_priors, 2).
"""
bbox_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
bbox_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
bbox_points = torch.stack((bbox_cx, bbox_cy), dim=1)
priors_cx = (priors[:, 0] + priors[:, 2]) / 2.0
priors_cy = (priors[:, 1] + priors[:, 3]) / 2.0
priors_points = torch.stack((priors_cx, priors_cy), dim=1)
distances = (bbox_points[:, None, :] -
priors_points[None, :, :]).pow(2).sum(-1).sqrt()
return distances, priors_points
@TASK_UTILS.register_module()
class BatchATSSAssigner(nn.Module):
"""Assign a batch of corresponding gt bboxes or background to each prior.
This code is based on
https://github.com/meituan/YOLOv6/blob/main/yolov6/assigners/atss_assigner.py
Each proposal will be assigned with `0` or a positive integer
indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
num_classes (int): number of class
iou_calculator (:obj:`ConfigDict` or dict): Config dict for iou
calculator. Defaults to ``dict(type='BboxOverlaps2D')``
topk (int): number of priors selected in each level
"""
def __init__(
self,
num_classes: int,
iou_calculator: ConfigType = dict(type='mmdet.BboxOverlaps2D'),
topk: int = 9):
super().__init__()
self.num_classes = num_classes
self.iou_calculator = TASK_UTILS.build(iou_calculator)
self.topk = topk
@torch.no_grad()
def forward(self, pred_bboxes: Tensor, priors: Tensor,
num_level_priors: List, gt_labels: Tensor, gt_bboxes: Tensor,
pad_bbox_flag: Tensor) -> dict:
"""Assign gt to priors.
The assignment is done in following steps
1. compute iou between all prior (prior of all pyramid levels) and gt
2. compute center distance between all prior and gt
3. on each pyramid level, for each gt, select k prior whose center
are closest to the gt center, so we total select k*l prior as
candidates for each gt
4. get corresponding iou for the these candidates, and compute the
mean and std, set mean + std as the iou threshold
5. select these candidates whose iou are greater than or equal to
the threshold as positive
6. limit the positive sample's center in gt
Args:
pred_bboxes (Tensor): Predicted bounding boxes,
shape(batch_size, num_priors, 4)
priors (Tensor): Model priors with stride, shape(num_priors, 4)
num_level_priors (List): Number of bboxes in each level, len(3)
gt_labels (Tensor): Ground truth label,
shape(batch_size, num_gt, 1)
gt_bboxes (Tensor): Ground truth bbox,
shape(batch_size, num_gt, 4)
pad_bbox_flag (Tensor): Ground truth bbox mask,
1 means bbox, 0 means no bbox,
shape(batch_size, num_gt, 1)
Returns:
assigned_result (dict): Assigned result
'assigned_labels' (Tensor): shape(batch_size, num_gt)
'assigned_bboxes' (Tensor): shape(batch_size, num_gt, 4)
'assigned_scores' (Tensor):
shape(batch_size, num_gt, number_classes)
'fg_mask_pre_prior' (Tensor): shape(bs, num_gt)
"""
# generate priors
cell_half_size = priors[:, 2:] * 2.5
priors_gen = torch.zeros_like(priors)
priors_gen[:, :2] = priors[:, :2] - cell_half_size
priors_gen[:, 2:] = priors[:, :2] + cell_half_size
priors = priors_gen
batch_size = gt_bboxes.size(0)
num_gt, num_priors = gt_bboxes.size(1), priors.size(0)
assigned_result = {
'assigned_labels':
gt_bboxes.new_full([batch_size, num_priors], self.num_classes),
'assigned_bboxes':
gt_bboxes.new_full([batch_size, num_priors, 4], 0),
'assigned_scores':
gt_bboxes.new_full([batch_size, num_priors, self.num_classes], 0),
'fg_mask_pre_prior':
gt_bboxes.new_full([batch_size, num_priors], 0)
}
if num_gt == 0:
return assigned_result
# compute iou between all prior (prior of all pyramid levels) and gt
overlaps = self.iou_calculator(gt_bboxes.reshape([-1, 4]), priors)
overlaps = overlaps.reshape([batch_size, -1, num_priors])
# compute center distance between all prior and gt
distances, priors_points = bbox_center_distance(
gt_bboxes.reshape([-1, 4]), priors)
distances = distances.reshape([batch_size, -1, num_priors])
# Selecting candidates based on the center distance
is_in_candidate, candidate_idxs = self.select_topk_candidates(
distances, num_level_priors, pad_bbox_flag)
# get corresponding iou for the these candidates, and compute the
# mean and std, set mean + std as the iou threshold
overlaps_thr_per_gt, iou_candidates = self.threshold_calculator(
is_in_candidate, candidate_idxs, overlaps, num_priors, batch_size,
num_gt)
# select candidates iou >= threshold as positive
is_pos = torch.where(
iou_candidates > overlaps_thr_per_gt.repeat([1, 1, num_priors]),
is_in_candidate, torch.zeros_like(is_in_candidate))
is_in_gts = select_candidates_in_gts(priors_points, gt_bboxes)
pos_mask = is_pos * is_in_gts * pad_bbox_flag
# if an anchor box is assigned to multiple gts,
# the one with the highest IoU will be selected.
gt_idx_pre_prior, fg_mask_pre_prior, pos_mask = \
select_highest_overlaps(pos_mask, overlaps, num_gt)
# assigned target
assigned_labels, assigned_bboxes, assigned_scores = self.get_targets(
gt_labels, gt_bboxes, gt_idx_pre_prior, fg_mask_pre_prior,
num_priors, batch_size, num_gt)
# soft label with iou
if pred_bboxes is not None:
ious = yolov6_iou_calculator(gt_bboxes, pred_bboxes) * pos_mask
ious = ious.max(axis=-2)[0].unsqueeze(-1)
assigned_scores *= ious
assigned_result['assigned_labels'] = assigned_labels.long()
assigned_result['assigned_bboxes'] = assigned_bboxes
assigned_result['assigned_scores'] = assigned_scores
assigned_result['fg_mask_pre_prior'] = fg_mask_pre_prior.bool()
return assigned_result
def select_topk_candidates(self, distances: Tensor,
num_level_priors: List[int],
pad_bbox_flag: Tensor) -> Tuple[Tensor, Tensor]:
"""Selecting candidates based on the center distance.
Args:
distances (Tensor): Distance between all bbox and gt,
shape(batch_size, num_gt, num_priors)
num_level_priors (List[int]): Number of bboxes in each level,
len(3)
pad_bbox_flag (Tensor): Ground truth bbox mask,
shape(batch_size, num_gt, 1)
Return:
is_in_candidate_list (Tensor): Flag show that each level have
topk candidates or not, shape(batch_size, num_gt, num_priors)
candidate_idxs (Tensor): Candidates index,
shape(batch_size, num_gt, num_gt)
"""
is_in_candidate_list = []
candidate_idxs = []
start_idx = 0
distances_dtype = distances.dtype
distances = torch.split(distances, num_level_priors, dim=-1)
pad_bbox_flag = pad_bbox_flag.repeat(1, 1, self.topk).bool()
for distances_per_level, priors_per_level in zip(
distances, num_level_priors):
# on each pyramid level, for each gt,
# select k bbox whose center are closest to the gt center
end_index = start_idx + priors_per_level
selected_k = min(self.topk, priors_per_level)
_, topk_idxs_per_level = distances_per_level.topk(
selected_k, dim=-1, largest=False)
candidate_idxs.append(topk_idxs_per_level + start_idx)
topk_idxs_per_level = torch.where(
pad_bbox_flag, topk_idxs_per_level,
torch.zeros_like(topk_idxs_per_level))
is_in_candidate = F.one_hot(topk_idxs_per_level,
priors_per_level).sum(dim=-2)
is_in_candidate = torch.where(is_in_candidate > 1,
torch.zeros_like(is_in_candidate),
is_in_candidate)
is_in_candidate_list.append(is_in_candidate.to(distances_dtype))
start_idx = end_index
is_in_candidate_list = torch.cat(is_in_candidate_list, dim=-1)
candidate_idxs = torch.cat(candidate_idxs, dim=-1)
return is_in_candidate_list, candidate_idxs
@staticmethod
def threshold_calculator(is_in_candidate: List, candidate_idxs: Tensor,
overlaps: Tensor, num_priors: int,
batch_size: int,
num_gt: int) -> Tuple[Tensor, Tensor]:
"""Get corresponding iou for the these candidates, and compute the mean
and std, set mean + std as the iou threshold.
Args:
is_in_candidate (Tensor): Flag show that each level have
topk candidates or not, shape(batch_size, num_gt, num_priors).
candidate_idxs (Tensor): Candidates index,
shape(batch_size, num_gt, num_gt)
overlaps (Tensor): Overlaps area,
shape(batch_size, num_gt, num_priors).
num_priors (int): Number of priors.
batch_size (int): Batch size.
num_gt (int): Number of ground truth.
Return:
overlaps_thr_per_gt (Tensor): Overlap threshold of
per ground truth, shape(batch_size, num_gt, 1).
candidate_overlaps (Tensor): Candidate overlaps,
shape(batch_size, num_gt, num_priors).
"""
batch_size_num_gt = batch_size * num_gt
candidate_overlaps = torch.where(is_in_candidate > 0, overlaps,
torch.zeros_like(overlaps))
candidate_idxs = candidate_idxs.reshape([batch_size_num_gt, -1])
assist_indexes = num_priors * torch.arange(
batch_size_num_gt, device=candidate_idxs.device)
assist_indexes = assist_indexes[:, None]
flatten_indexes = candidate_idxs + assist_indexes
candidate_overlaps_reshape = candidate_overlaps.reshape(
-1)[flatten_indexes]
candidate_overlaps_reshape = candidate_overlaps_reshape.reshape(
[batch_size, num_gt, -1])
overlaps_mean_per_gt = candidate_overlaps_reshape.mean(
axis=-1, keepdim=True)
overlaps_std_per_gt = candidate_overlaps_reshape.std(
axis=-1, keepdim=True)
overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
return overlaps_thr_per_gt, candidate_overlaps
def get_targets(self, gt_labels: Tensor, gt_bboxes: Tensor,
assigned_gt_inds: Tensor, fg_mask_pre_prior: Tensor,
num_priors: int, batch_size: int,
num_gt: int) -> Tuple[Tensor, Tensor, Tensor]:
"""Get target info.
Args:
gt_labels (Tensor): Ground true labels,
shape(batch_size, num_gt, 1)
gt_bboxes (Tensor): Ground true bboxes,
shape(batch_size, num_gt, 4)
assigned_gt_inds (Tensor): Assigned ground truth indexes,
shape(batch_size, num_priors)
fg_mask_pre_prior (Tensor): Force ground truth matching mask,
shape(batch_size, num_priors)
num_priors (int): Number of priors.
batch_size (int): Batch size.
num_gt (int): Number of ground truth.
Return:
assigned_labels (Tensor): Assigned labels,
shape(batch_size, num_priors)
assigned_bboxes (Tensor): Assigned bboxes,
shape(batch_size, num_priors)
assigned_scores (Tensor): Assigned scores,
shape(batch_size, num_priors)
"""
# assigned target labels
batch_index = torch.arange(
batch_size, dtype=gt_labels.dtype, device=gt_labels.device)
batch_index = batch_index[..., None]
assigned_gt_inds = (assigned_gt_inds + batch_index * num_gt).long()
assigned_labels = gt_labels.flatten()[assigned_gt_inds.flatten()]
assigned_labels = assigned_labels.reshape([batch_size, num_priors])
assigned_labels = torch.where(
fg_mask_pre_prior > 0, assigned_labels,
torch.full_like(assigned_labels, self.num_classes))
# assigned target boxes
assigned_bboxes = gt_bboxes.reshape([-1,
4])[assigned_gt_inds.flatten()]
assigned_bboxes = assigned_bboxes.reshape([batch_size, num_priors, 4])
# assigned target scores
assigned_scores = F.one_hot(assigned_labels.long(),
self.num_classes + 1).float()
assigned_scores = assigned_scores[:, :, :self.num_classes]
return assigned_labels, assigned_bboxes, assigned_scores
| 14,471 | 41.564706 | 81 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/assigners/batch_task_aligned_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from mmyolo.models.losses import bbox_overlaps
from mmyolo.registry import TASK_UTILS
from .utils import (select_candidates_in_gts, select_highest_overlaps,
yolov6_iou_calculator)
@TASK_UTILS.register_module()
class BatchTaskAlignedAssigner(nn.Module):
"""This code referenced to
https://github.com/meituan/YOLOv6/blob/main/yolov6/
assigners/tal_assigner.py.
Batch Task aligned assigner base on the paper:
`TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_.
Assign a corresponding gt bboxes or background to a batch of
predicted bboxes. Each bbox will be assigned with `0` or a
positive integer indicating the ground truth index.
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
num_classes (int): number of class
topk (int): number of bbox selected in each level
alpha (float): Hyper-parameters related to alignment_metrics.
Defaults to 1.0
beta (float): Hyper-parameters related to alignment_metrics.
Defaults to 6.
eps (float): Eps to avoid log(0). Default set to 1e-9
use_ciou (bool): Whether to use ciou while calculating iou.
Defaults to False.
"""
def __init__(self,
num_classes: int,
topk: int = 13,
alpha: float = 1.0,
beta: float = 6.0,
eps: float = 1e-7,
use_ciou: bool = False):
super().__init__()
self.num_classes = num_classes
self.topk = topk
self.alpha = alpha
self.beta = beta
self.eps = eps
self.use_ciou = use_ciou
@torch.no_grad()
def forward(
self,
pred_bboxes: Tensor,
pred_scores: Tensor,
priors: Tensor,
gt_labels: Tensor,
gt_bboxes: Tensor,
pad_bbox_flag: Tensor,
) -> dict:
"""Assign gt to bboxes.
The assignment is done in following steps
1. compute alignment metric between all bbox (bbox of all pyramid
levels) and gt
2. select top-k bbox as candidates for each gt
3. limit the positive sample's center in gt (because the anchor-free
detector only can predict positive distance)
Args:
pred_bboxes (Tensor): Predict bboxes,
shape(batch_size, num_priors, 4)
pred_scores (Tensor): Scores of predict bboxes,
shape(batch_size, num_priors, num_classes)
priors (Tensor): Model priors, shape (num_priors, 4)
gt_labels (Tensor): Ground true labels,
shape(batch_size, num_gt, 1)
gt_bboxes (Tensor): Ground true bboxes,
shape(batch_size, num_gt, 4)
pad_bbox_flag (Tensor): Ground truth bbox mask,
1 means bbox, 0 means no bbox,
shape(batch_size, num_gt, 1)
Returns:
assigned_result (dict) Assigned result:
assigned_labels (Tensor): Assigned labels,
shape(batch_size, num_priors)
assigned_bboxes (Tensor): Assigned boxes,
shape(batch_size, num_priors, 4)
assigned_scores (Tensor): Assigned scores,
shape(batch_size, num_priors, num_classes)
fg_mask_pre_prior (Tensor): Force ground truth matching mask,
shape(batch_size, num_priors)
"""
# (num_priors, 4) -> (num_priors, 2)
priors = priors[:, :2]
batch_size = pred_scores.size(0)
num_gt = gt_bboxes.size(1)
assigned_result = {
'assigned_labels':
gt_bboxes.new_full(pred_scores[..., 0].shape, self.num_classes),
'assigned_bboxes':
gt_bboxes.new_full(pred_bboxes.shape, 0),
'assigned_scores':
gt_bboxes.new_full(pred_scores.shape, 0),
'fg_mask_pre_prior':
gt_bboxes.new_full(pred_scores[..., 0].shape, 0)
}
if num_gt == 0:
return assigned_result
pos_mask, alignment_metrics, overlaps = self.get_pos_mask(
pred_bboxes, pred_scores, priors, gt_labels, gt_bboxes,
pad_bbox_flag, batch_size, num_gt)
(assigned_gt_idxs, fg_mask_pre_prior,
pos_mask) = select_highest_overlaps(pos_mask, overlaps, num_gt)
# assigned target
assigned_labels, assigned_bboxes, assigned_scores = self.get_targets(
gt_labels, gt_bboxes, assigned_gt_idxs, fg_mask_pre_prior,
batch_size, num_gt)
# normalize
alignment_metrics *= pos_mask
pos_align_metrics = alignment_metrics.max(axis=-1, keepdim=True)[0]
pos_overlaps = (overlaps * pos_mask).max(axis=-1, keepdim=True)[0]
norm_align_metric = (
alignment_metrics * pos_overlaps /
(pos_align_metrics + self.eps)).max(-2)[0].unsqueeze(-1)
assigned_scores = assigned_scores * norm_align_metric
assigned_result['assigned_labels'] = assigned_labels
assigned_result['assigned_bboxes'] = assigned_bboxes
assigned_result['assigned_scores'] = assigned_scores
assigned_result['fg_mask_pre_prior'] = fg_mask_pre_prior.bool()
return assigned_result
def get_pos_mask(self, pred_bboxes: Tensor, pred_scores: Tensor,
priors: Tensor, gt_labels: Tensor, gt_bboxes: Tensor,
pad_bbox_flag: Tensor, batch_size: int,
num_gt: int) -> Tuple[Tensor, Tensor, Tensor]:
"""Get possible mask.
Args:
pred_bboxes (Tensor): Predict bboxes,
shape(batch_size, num_priors, 4)
pred_scores (Tensor): Scores of predict bbox,
shape(batch_size, num_priors, num_classes)
priors (Tensor): Model priors, shape (num_priors, 2)
gt_labels (Tensor): Ground true labels,
shape(batch_size, num_gt, 1)
gt_bboxes (Tensor): Ground true bboxes,
shape(batch_size, num_gt, 4)
pad_bbox_flag (Tensor): Ground truth bbox mask,
1 means bbox, 0 means no bbox,
shape(batch_size, num_gt, 1)
batch_size (int): Batch size.
num_gt (int): Number of ground truth.
Returns:
pos_mask (Tensor): Possible mask,
shape(batch_size, num_gt, num_priors)
alignment_metrics (Tensor): Alignment metrics,
shape(batch_size, num_gt, num_priors)
overlaps (Tensor): Overlaps of gt_bboxes and pred_bboxes,
shape(batch_size, num_gt, num_priors)
"""
# Compute alignment metric between all bbox and gt
alignment_metrics, overlaps = \
self.get_box_metrics(pred_bboxes, pred_scores, gt_labels,
gt_bboxes, batch_size, num_gt)
# get is_in_gts mask
is_in_gts = select_candidates_in_gts(priors, gt_bboxes)
# get topk_metric mask
topk_metric = self.select_topk_candidates(
alignment_metrics * is_in_gts,
topk_mask=pad_bbox_flag.repeat([1, 1, self.topk]).bool())
# merge all mask to a final mask
pos_mask = topk_metric * is_in_gts * pad_bbox_flag
return pos_mask, alignment_metrics, overlaps
def get_box_metrics(self, pred_bboxes: Tensor, pred_scores: Tensor,
gt_labels: Tensor, gt_bboxes: Tensor, batch_size: int,
num_gt: int) -> Tuple[Tensor, Tensor]:
"""Compute alignment metric between all bbox and gt.
Args:
pred_bboxes (Tensor): Predict bboxes,
shape(batch_size, num_priors, 4)
pred_scores (Tensor): Scores of predict bbox,
shape(batch_size, num_priors, num_classes)
gt_labels (Tensor): Ground true labels,
shape(batch_size, num_gt, 1)
gt_bboxes (Tensor): Ground true bboxes,
shape(batch_size, num_gt, 4)
batch_size (int): Batch size.
num_gt (int): Number of ground truth.
Returns:
alignment_metrics (Tensor): Align metric,
shape(batch_size, num_gt, num_priors)
overlaps (Tensor): Overlaps, shape(batch_size, num_gt, num_priors)
"""
pred_scores = pred_scores.permute(0, 2, 1)
gt_labels = gt_labels.to(torch.long)
idx = torch.zeros([2, batch_size, num_gt], dtype=torch.long)
idx[0] = torch.arange(end=batch_size).view(-1, 1).repeat(1, num_gt)
idx[1] = gt_labels.squeeze(-1)
bbox_scores = pred_scores[idx[0], idx[1]]
# TODO: need to replace the yolov6_iou_calculator function
if self.use_ciou:
overlaps = bbox_overlaps(
pred_bboxes.unsqueeze(1),
gt_bboxes.unsqueeze(2),
iou_mode='ciou',
bbox_format='xyxy').clamp(0)
else:
overlaps = yolov6_iou_calculator(gt_bboxes, pred_bboxes)
alignment_metrics = bbox_scores.pow(self.alpha) * overlaps.pow(
self.beta)
return alignment_metrics, overlaps
def select_topk_candidates(self,
alignment_gt_metrics: Tensor,
using_largest_topk: bool = True,
topk_mask: Optional[Tensor] = None) -> Tensor:
"""Compute alignment metric between all bbox and gt.
Args:
alignment_gt_metrics (Tensor): Alignment metric of gt candidates,
shape(batch_size, num_gt, num_priors)
using_largest_topk (bool): Controls whether to using largest or
smallest elements.
topk_mask (Tensor): Topk mask,
shape(batch_size, num_gt, self.topk)
Returns:
Tensor: Topk candidates mask,
shape(batch_size, num_gt, num_priors)
"""
num_priors = alignment_gt_metrics.shape[-1]
topk_metrics, topk_idxs = torch.topk(
alignment_gt_metrics,
self.topk,
axis=-1,
largest=using_largest_topk)
if topk_mask is None:
topk_mask = (topk_metrics.max(axis=-1, keepdim=True) >
self.eps).tile([1, 1, self.topk])
topk_idxs = torch.where(topk_mask, topk_idxs,
torch.zeros_like(topk_idxs))
is_in_topk = F.one_hot(topk_idxs, num_priors).sum(axis=-2)
is_in_topk = torch.where(is_in_topk > 1, torch.zeros_like(is_in_topk),
is_in_topk)
return is_in_topk.to(alignment_gt_metrics.dtype)
def get_targets(self, gt_labels: Tensor, gt_bboxes: Tensor,
assigned_gt_idxs: Tensor, fg_mask_pre_prior: Tensor,
batch_size: int,
num_gt: int) -> Tuple[Tensor, Tensor, Tensor]:
"""Get assigner info.
Args:
gt_labels (Tensor): Ground true labels,
shape(batch_size, num_gt, 1)
gt_bboxes (Tensor): Ground true bboxes,
shape(batch_size, num_gt, 4)
assigned_gt_idxs (Tensor): Assigned ground truth indexes,
shape(batch_size, num_priors)
fg_mask_pre_prior (Tensor): Force ground truth matching mask,
shape(batch_size, num_priors)
batch_size (int): Batch size.
num_gt (int): Number of ground truth.
Returns:
assigned_labels (Tensor): Assigned labels,
shape(batch_size, num_priors)
assigned_bboxes (Tensor): Assigned bboxes,
shape(batch_size, num_priors)
assigned_scores (Tensor): Assigned scores,
shape(batch_size, num_priors)
"""
# assigned target labels
batch_ind = torch.arange(
end=batch_size, dtype=torch.int64, device=gt_labels.device)[...,
None]
assigned_gt_idxs = assigned_gt_idxs + batch_ind * num_gt
assigned_labels = gt_labels.long().flatten()[assigned_gt_idxs]
# assigned target boxes
assigned_bboxes = gt_bboxes.reshape([-1, 4])[assigned_gt_idxs]
# assigned target scores
assigned_labels[assigned_labels < 0] = 0
assigned_scores = F.one_hot(assigned_labels, self.num_classes)
force_gt_scores_mask = fg_mask_pre_prior[:, :, None].repeat(
1, 1, self.num_classes)
assigned_scores = torch.where(force_gt_scores_mask > 0,
assigned_scores,
torch.full_like(assigned_scores, 0))
return assigned_labels, assigned_bboxes, assigned_scores
| 13,143 | 41.128205 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/coders/distance_point_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmdet.models.task_modules.coders import \
DistancePointBBoxCoder as MMDET_DistancePointBBoxCoder
from mmdet.structures.bbox import bbox2distance, distance2bbox
from mmyolo.registry import TASK_UTILS
@TASK_UTILS.register_module()
class DistancePointBBoxCoder(MMDET_DistancePointBBoxCoder):
"""Distance Point BBox coder.
This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
right) and decode it back to the original.
"""
def decode(
self,
points: torch.Tensor,
pred_bboxes: torch.Tensor,
stride: torch.Tensor,
max_shape: Optional[Union[Sequence[int], torch.Tensor,
Sequence[Sequence[int]]]] = None
) -> torch.Tensor:
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom). Shape (B, N, 4)
or (N, 4)
stride (Tensor): Featmap stride.
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 4) or (B, N, 4)
"""
assert points.size(-2) == pred_bboxes.size(-2)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 4
if self.clip_border is False:
max_shape = None
pred_bboxes = pred_bboxes * stride[None, :, None]
return distance2bbox(points, pred_bboxes, max_shape)
def encode(self,
points: torch.Tensor,
gt_bboxes: torch.Tensor,
max_dis: float = 16.,
eps: float = 0.01) -> torch.Tensor:
"""Encode bounding box to distances. The rewrite is to support batch
operations.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2), The format is [x, y].
gt_bboxes (Tensor or :obj:`BaseBoxes`): Shape (N, 4), The format
is "xyxy"
max_dis (float): Upper bound of the distance. Default to 16..
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.01.
Returns:
Tensor: Box transformation deltas. The shape is (N, 4) or
(B, N, 4).
"""
assert points.size(-2) == gt_bboxes.size(-2)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 4
return bbox2distance(points, gt_bboxes, max_dis, eps)
| 2,948 | 35.8625 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/coders/yolox_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from mmdet.models.task_modules.coders.base_bbox_coder import BaseBBoxCoder
from mmyolo.registry import TASK_UTILS
@TASK_UTILS.register_module()
class YOLOXBBoxCoder(BaseBBoxCoder):
"""YOLOX BBox coder.
This decoder decodes pred bboxes (delta_x, delta_x, w, h) to bboxes (tl_x,
tl_y, br_x, br_y).
"""
def encode(self, **kwargs):
"""Encode deltas between bboxes and ground truth boxes."""
pass
def decode(self, priors: torch.Tensor, pred_bboxes: torch.Tensor,
stride: Union[torch.Tensor, int]) -> torch.Tensor:
"""Decode regression results (delta_x, delta_x, w, h) to bboxes (tl_x,
tl_y, br_x, br_y).
Args:
priors (torch.Tensor): Basic boxes or points, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
stride = stride[None, :, None]
xys = (pred_bboxes[..., :2] * stride) + priors
whs = pred_bboxes[..., 2:].exp() * stride
tl_x = (xys[..., 0] - whs[..., 0] / 2)
tl_y = (xys[..., 1] - whs[..., 1] / 2)
br_x = (xys[..., 0] + whs[..., 0] / 2)
br_y = (xys[..., 1] + whs[..., 1] / 2)
decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)
return decoded_bboxes
| 1,477 | 31.130435 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/coders/distance_angle_point_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
import torch
from mmyolo.registry import TASK_UTILS
try:
from mmrotate.models.task_modules.coders import \
DistanceAnglePointCoder as MMROTATE_DistanceAnglePointCoder
MMROTATE_AVAILABLE = True
except ImportError:
from mmdet.models.task_modules.coders import BaseBBoxCoder
MMROTATE_DistanceAnglePointCoder = BaseBBoxCoder
MMROTATE_AVAILABLE = False
@TASK_UTILS.register_module()
class DistanceAnglePointCoder(MMROTATE_DistanceAnglePointCoder):
"""Distance Angle Point BBox coder.
This coder encodes gt bboxes (x, y, w, h, theta) into (top, bottom, left,
right, theta) and decode it back to the original.
"""
def __init__(self, clip_border=True, angle_version='oc'):
if not MMROTATE_AVAILABLE:
raise ImportError(
'Please run "mim install -r requirements/mmrotate.txt" '
'to install mmrotate first for rotated detection.')
super().__init__(clip_border=clip_border, angle_version=angle_version)
def decode(
self,
points: torch.Tensor,
pred_bboxes: torch.Tensor,
stride: torch.Tensor,
max_shape: Optional[Union[Sequence[int], torch.Tensor,
Sequence[Sequence[int]]]] = None,
) -> torch.Tensor:
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (B, N, 2) or (N, 2).
pred_bboxes (Tensor): Distance from the given point to 4
boundaries and angle (left, top, right, bottom, angle).
Shape (B, N, 5) or (N, 5)
max_shape (Sequence[int] or torch.Tensor or Sequence[
Sequence[int]],optional): Maximum bounds for boxes, specifies
(H, W, C) or (H, W). If priors shape is (B, N, 4), then
the max_shape should be a Sequence[Sequence[int]],
and the length of max_shape should also be B.
Default None.
Returns:
Tensor: Boxes with shape (N, 5) or (B, N, 5)
"""
assert points.size(-2) == pred_bboxes.size(-2)
assert points.size(-1) == 2
assert pred_bboxes.size(-1) == 5
if self.clip_border is False:
max_shape = None
if pred_bboxes.dim() == 2:
stride = stride[:, None]
else:
stride = stride[None, :, None]
pred_bboxes[..., :4] = pred_bboxes[..., :4] * stride
return self.distance2obb(points, pred_bboxes, max_shape,
self.angle_version)
def encode(self,
points: torch.Tensor,
gt_bboxes: torch.Tensor,
max_dis: float = 16.,
eps: float = 0.01) -> torch.Tensor:
"""Encode bounding box to distances.
Args:
points (Tensor): Shape (N, 2), The format is [x, y].
gt_bboxes (Tensor): Shape (N, 5), The format is "xywha"
max_dis (float): Upper bound of the distance. Default None.
eps (float): a small value to ensure target < max_dis, instead <=.
Default 0.1.
Returns:
Tensor: Box transformation deltas. The shape is (N, 5).
"""
assert points.size(-2) == gt_bboxes.size(-2)
assert points.size(-1) == 2
assert gt_bboxes.size(-1) == 5
return self.obb2distance(points, gt_bboxes, max_dis, eps)
| 3,512 | 35.978947 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/task_modules/coders/yolov5_bbox_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Union
import torch
from mmdet.models.task_modules.coders.base_bbox_coder import BaseBBoxCoder
from mmyolo.registry import TASK_UTILS
@TASK_UTILS.register_module()
class YOLOv5BBoxCoder(BaseBBoxCoder):
"""YOLOv5 BBox coder.
This decoder decodes pred bboxes (delta_x, delta_x, w, h) to bboxes (tl_x,
tl_y, br_x, br_y).
"""
def encode(self, **kwargs):
"""Encode deltas between bboxes and ground truth boxes."""
pass
def decode(self, priors: torch.Tensor, pred_bboxes: torch.Tensor,
stride: Union[torch.Tensor, int]) -> torch.Tensor:
"""Decode regression results (delta_x, delta_x, w, h) to bboxes (tl_x,
tl_y, br_x, br_y).
Args:
priors (torch.Tensor): Basic boxes or points, e.g. anchors.
pred_bboxes (torch.Tensor): Encoded boxes with shape
stride (torch.Tensor | int): Strides of bboxes.
Returns:
torch.Tensor: Decoded boxes.
"""
assert pred_bboxes.size(-1) == priors.size(-1) == 4
pred_bboxes = pred_bboxes.sigmoid()
x_center = (priors[..., 0] + priors[..., 2]) * 0.5
y_center = (priors[..., 1] + priors[..., 3]) * 0.5
w = priors[..., 2] - priors[..., 0]
h = priors[..., 3] - priors[..., 1]
# The anchor of mmdet has been offset by 0.5
x_center_pred = (pred_bboxes[..., 0] - 0.5) * 2 * stride + x_center
y_center_pred = (pred_bboxes[..., 1] - 0.5) * 2 * stride + y_center
w_pred = (pred_bboxes[..., 2] * 2)**2 * w
h_pred = (pred_bboxes[..., 3] * 2)**2 * h
decoded_bboxes = torch.stack(
(x_center_pred - w_pred / 2, y_center_pred - h_pred / 2,
x_center_pred + w_pred / 2, y_center_pred + h_pred / 2),
dim=-1)
return decoded_bboxes
| 1,895 | 32.857143 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/losses/iou_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from mmdet.models.losses.utils import weight_reduce_loss
from mmdet.structures.bbox import HorizontalBoxes
from mmyolo.registry import MODELS
def bbox_overlaps(pred: torch.Tensor,
target: torch.Tensor,
iou_mode: str = 'ciou',
bbox_format: str = 'xywh',
siou_theta: float = 4.0,
eps: float = 1e-7) -> torch.Tensor:
r"""Calculate overlap between two set of bboxes.
`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
In the CIoU implementation of YOLOv5 and MMDetection, there is a slight
difference in the way the alpha parameter is computed.
mmdet version:
alpha = (ious > 0.5).float() * v / (1 - ious + v)
YOLOv5 version:
alpha = v / (v - ious + (1 + eps)
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2)
or (x, y, w, h),shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
iou_mode (str): Options are ('iou', 'ciou', 'giou', 'siou').
Defaults to "ciou".
bbox_format (str): Options are "xywh" and "xyxy".
Defaults to "xywh".
siou_theta (float): siou_theta for SIoU when calculate shape cost.
Defaults to 4.0.
eps (float): Eps to avoid log(0).
Returns:
Tensor: shape (n, ).
"""
assert iou_mode in ('iou', 'ciou', 'giou', 'siou')
assert bbox_format in ('xyxy', 'xywh')
if bbox_format == 'xywh':
pred = HorizontalBoxes.cxcywh_to_xyxy(pred)
target = HorizontalBoxes.cxcywh_to_xyxy(target)
bbox1_x1, bbox1_y1 = pred[..., 0], pred[..., 1]
bbox1_x2, bbox1_y2 = pred[..., 2], pred[..., 3]
bbox2_x1, bbox2_y1 = target[..., 0], target[..., 1]
bbox2_x2, bbox2_y2 = target[..., 2], target[..., 3]
# Overlap
overlap = (torch.min(bbox1_x2, bbox2_x2) -
torch.max(bbox1_x1, bbox2_x1)).clamp(0) * \
(torch.min(bbox1_y2, bbox2_y2) -
torch.max(bbox1_y1, bbox2_y1)).clamp(0)
# Union
w1, h1 = bbox1_x2 - bbox1_x1, bbox1_y2 - bbox1_y1
w2, h2 = bbox2_x2 - bbox2_x1, bbox2_y2 - bbox2_y1
union = (w1 * h1) + (w2 * h2) - overlap + eps
h1 = bbox1_y2 - bbox1_y1 + eps
h2 = bbox2_y2 - bbox2_y1 + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[..., :2], target[..., :2])
enclose_x2y2 = torch.max(pred[..., 2:], target[..., 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
enclose_w = enclose_wh[..., 0] # cw
enclose_h = enclose_wh[..., 1] # ch
if iou_mode == 'ciou':
# CIoU = IoU - ( (ρ^2(b_pred,b_gt) / c^2) + (alpha x v) )
# calculate enclose area (c^2)
enclose_area = enclose_w**2 + enclose_h**2 + eps
# calculate ρ^2(b_pred,b_gt):
# euclidean distance between b_pred(bbox2) and b_gt(bbox1)
# center point, because bbox format is xyxy -> left-top xy and
# right-bottom xy, so need to / 4 to get center point.
rho2_left_item = ((bbox2_x1 + bbox2_x2) - (bbox1_x1 + bbox1_x2))**2 / 4
rho2_right_item = ((bbox2_y1 + bbox2_y2) -
(bbox1_y1 + bbox1_y2))**2 / 4
rho2 = rho2_left_item + rho2_right_item # rho^2 (ρ^2)
# Width and height ratio (v)
wh_ratio = (4 / (math.pi**2)) * torch.pow(
torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = wh_ratio / (wh_ratio - ious + (1 + eps))
# CIoU
ious = ious - ((rho2 / enclose_area) + (alpha * wh_ratio))
elif iou_mode == 'giou':
# GIoU = IoU - ( (A_c - union) / A_c )
convex_area = enclose_w * enclose_h + eps # convex area (A_c)
ious = ious - (convex_area - union) / convex_area
elif iou_mode == 'siou':
# SIoU: https://arxiv.org/pdf/2205.12740.pdf
# SIoU = IoU - ( (Distance Cost + Shape Cost) / 2 )
# calculate sigma (σ):
# euclidean distance between bbox2(pred) and bbox1(gt) center point,
# sigma_cw = b_cx_gt - b_cx
sigma_cw = (bbox2_x1 + bbox2_x2) / 2 - (bbox1_x1 + bbox1_x2) / 2 + eps
# sigma_ch = b_cy_gt - b_cy
sigma_ch = (bbox2_y1 + bbox2_y2) / 2 - (bbox1_y1 + bbox1_y2) / 2 + eps
# sigma = √( (sigma_cw ** 2) - (sigma_ch ** 2) )
sigma = torch.pow(sigma_cw**2 + sigma_ch**2, 0.5)
# choose minimize alpha, sin(alpha)
sin_alpha = torch.abs(sigma_ch) / sigma
sin_beta = torch.abs(sigma_cw) / sigma
sin_alpha = torch.where(sin_alpha <= math.sin(math.pi / 4), sin_alpha,
sin_beta)
# Angle cost = 1 - 2 * ( sin^2 ( arcsin(x) - (pi / 4) ) )
angle_cost = torch.cos(torch.arcsin(sin_alpha) * 2 - math.pi / 2)
# Distance cost = Σ_(t=x,y) (1 - e ^ (- γ ρ_t))
rho_x = (sigma_cw / enclose_w)**2 # ρ_x
rho_y = (sigma_ch / enclose_h)**2 # ρ_y
gamma = 2 - angle_cost # γ
distance_cost = (1 - torch.exp(-1 * gamma * rho_x)) + (
1 - torch.exp(-1 * gamma * rho_y))
# Shape cost = Ω = Σ_(t=w,h) ( ( 1 - ( e ^ (-ω_t) ) ) ^ θ )
omiga_w = torch.abs(w1 - w2) / torch.max(w1, w2) # ω_w
omiga_h = torch.abs(h1 - h2) / torch.max(h1, h2) # ω_h
shape_cost = torch.pow(1 - torch.exp(-1 * omiga_w),
siou_theta) + torch.pow(
1 - torch.exp(-1 * omiga_h), siou_theta)
ious = ious - ((distance_cost + shape_cost) * 0.5)
return ious.clamp(min=-1.0, max=1.0)
@MODELS.register_module()
class IoULoss(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
iou_mode (str): Options are "ciou".
Defaults to "ciou".
bbox_format (str): Options are "xywh" and "xyxy".
Defaults to "xywh".
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
return_iou (bool): If True, return loss and iou.
"""
def __init__(self,
iou_mode: str = 'ciou',
bbox_format: str = 'xywh',
eps: float = 1e-7,
reduction: str = 'mean',
loss_weight: float = 1.0,
return_iou: bool = True):
super().__init__()
assert bbox_format in ('xywh', 'xyxy')
assert iou_mode in ('ciou', 'siou', 'giou')
self.iou_mode = iou_mode
self.bbox_format = bbox_format
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
self.return_iou = return_iou
def forward(
self,
pred: torch.Tensor,
target: torch.Tensor,
weight: Optional[torch.Tensor] = None,
avg_factor: Optional[float] = None,
reduction_override: Optional[Union[str, bool]] = None
) -> Tuple[Union[torch.Tensor, torch.Tensor], torch.Tensor]:
"""Forward function.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2)
or (x, y, w, h),shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
weight (Tensor, optional): Element-wise weights.
avg_factor (float, optional): Average factor when computing the
mean of losses.
reduction_override (str, bool, optional): Same as built-in losses
of PyTorch. Defaults to None.
Returns:
loss or tuple(loss, iou):
"""
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
weight = weight.mean(-1)
iou = bbox_overlaps(
pred,
target,
iou_mode=self.iou_mode,
bbox_format=self.bbox_format,
eps=self.eps)
loss = self.loss_weight * weight_reduce_loss(1.0 - iou, weight,
reduction, avg_factor)
if self.return_iou:
return loss, iou
else:
return loss
| 8,786 | 36.712446 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/backbones/yolov7_backbone.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Optional, Tuple, Union
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.backbones.csp_darknet import Focus
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from ..layers import MaxPoolAndStrideConvBlock
from .base_backbone import BaseBackbone
@MODELS.register_module()
class YOLOv7Backbone(BaseBackbone):
"""Backbone used in YOLOv7.
Args:
arch (str): Architecture of YOLOv7Defaults to L.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
out_indices (Sequence[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config norm layer. Defaults to dict(type='BN', requires_grad=True).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict.
"""
_tiny_stage1_cfg = dict(type='TinyDownSampleBlock', middle_ratio=0.5)
_tiny_stage2_4_cfg = dict(type='TinyDownSampleBlock', middle_ratio=1.0)
_l_expand_channel_2x = dict(
type='ELANBlock',
middle_ratio=0.5,
block_ratio=0.5,
num_blocks=2,
num_convs_in_block=2)
_l_no_change_channel = dict(
type='ELANBlock',
middle_ratio=0.25,
block_ratio=0.25,
num_blocks=2,
num_convs_in_block=2)
_x_expand_channel_2x = dict(
type='ELANBlock',
middle_ratio=0.4,
block_ratio=0.4,
num_blocks=3,
num_convs_in_block=2)
_x_no_change_channel = dict(
type='ELANBlock',
middle_ratio=0.2,
block_ratio=0.2,
num_blocks=3,
num_convs_in_block=2)
_w_no_change_channel = dict(
type='ELANBlock',
middle_ratio=0.5,
block_ratio=0.5,
num_blocks=2,
num_convs_in_block=2)
_e_no_change_channel = dict(
type='ELANBlock',
middle_ratio=0.4,
block_ratio=0.4,
num_blocks=3,
num_convs_in_block=2)
_d_no_change_channel = dict(
type='ELANBlock',
middle_ratio=1 / 3,
block_ratio=1 / 3,
num_blocks=4,
num_convs_in_block=2)
_e2e_no_change_channel = dict(
type='EELANBlock',
num_elan_block=2,
middle_ratio=0.4,
block_ratio=0.4,
num_blocks=3,
num_convs_in_block=2)
# From left to right:
# in_channels, out_channels, Block_params
arch_settings = {
'Tiny': [[64, 64, _tiny_stage1_cfg], [64, 128, _tiny_stage2_4_cfg],
[128, 256, _tiny_stage2_4_cfg],
[256, 512, _tiny_stage2_4_cfg]],
'L': [[64, 256, _l_expand_channel_2x],
[256, 512, _l_expand_channel_2x],
[512, 1024, _l_expand_channel_2x],
[1024, 1024, _l_no_change_channel]],
'X': [[80, 320, _x_expand_channel_2x],
[320, 640, _x_expand_channel_2x],
[640, 1280, _x_expand_channel_2x],
[1280, 1280, _x_no_change_channel]],
'W':
[[64, 128, _w_no_change_channel], [128, 256, _w_no_change_channel],
[256, 512, _w_no_change_channel], [512, 768, _w_no_change_channel],
[768, 1024, _w_no_change_channel]],
'E':
[[80, 160, _e_no_change_channel], [160, 320, _e_no_change_channel],
[320, 640, _e_no_change_channel], [640, 960, _e_no_change_channel],
[960, 1280, _e_no_change_channel]],
'D': [[96, 192,
_d_no_change_channel], [192, 384, _d_no_change_channel],
[384, 768, _d_no_change_channel],
[768, 1152, _d_no_change_channel],
[1152, 1536, _d_no_change_channel]],
'E2E': [[80, 160, _e2e_no_change_channel],
[160, 320, _e2e_no_change_channel],
[320, 640, _e2e_no_change_channel],
[640, 960, _e2e_no_change_channel],
[960, 1280, _e2e_no_change_channel]],
}
def __init__(self,
arch: str = 'L',
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Tuple[int] = (2, 3, 4),
frozen_stages: int = -1,
plugins: Union[dict, List[dict]] = None,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
norm_eval: bool = False,
init_cfg: OptMultiConfig = None):
assert arch in self.arch_settings.keys()
self.arch = arch
super().__init__(
self.arch_settings[arch],
deepen_factor,
widen_factor,
input_channels=input_channels,
out_indices=out_indices,
plugins=plugins,
frozen_stages=frozen_stages,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
init_cfg=init_cfg)
def build_stem_layer(self) -> nn.Module:
"""Build a stem layer."""
if self.arch in ['L', 'X']:
stem = nn.Sequential(
ConvModule(
3,
int(self.arch_setting[0][0] * self.widen_factor // 2),
3,
padding=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
int(self.arch_setting[0][0] * self.widen_factor // 2),
int(self.arch_setting[0][0] * self.widen_factor),
3,
padding=1,
stride=2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
int(self.arch_setting[0][0] * self.widen_factor),
int(self.arch_setting[0][0] * self.widen_factor),
3,
padding=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
elif self.arch == 'Tiny':
stem = nn.Sequential(
ConvModule(
3,
int(self.arch_setting[0][0] * self.widen_factor // 2),
3,
padding=1,
stride=2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
int(self.arch_setting[0][0] * self.widen_factor // 2),
int(self.arch_setting[0][0] * self.widen_factor),
3,
padding=1,
stride=2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
elif self.arch in ['W', 'E', 'D', 'E2E']:
stem = Focus(
3,
int(self.arch_setting[0][0] * self.widen_factor),
kernel_size=3,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
return stem
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, stage_block_cfg = setting
in_channels = int(in_channels * self.widen_factor)
out_channels = int(out_channels * self.widen_factor)
stage_block_cfg = stage_block_cfg.copy()
stage_block_cfg.setdefault('norm_cfg', self.norm_cfg)
stage_block_cfg.setdefault('act_cfg', self.act_cfg)
stage_block_cfg['in_channels'] = in_channels
stage_block_cfg['out_channels'] = out_channels
stage = []
if self.arch in ['W', 'E', 'D', 'E2E']:
stage_block_cfg['in_channels'] = out_channels
elif self.arch in ['L', 'X']:
if stage_idx == 0:
stage_block_cfg['in_channels'] = out_channels // 2
downsample_layer = self._build_downsample_layer(
stage_idx, in_channels, out_channels)
stage.append(MODELS.build(stage_block_cfg))
if downsample_layer is not None:
stage.insert(0, downsample_layer)
return stage
def _build_downsample_layer(self, stage_idx: int, in_channels: int,
out_channels: int) -> Optional[nn.Module]:
"""Build a downsample layer pre stage."""
if self.arch in ['E', 'D', 'E2E']:
downsample_layer = MaxPoolAndStrideConvBlock(
in_channels,
out_channels,
use_in_channels_of_middle=True,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
elif self.arch == 'W':
downsample_layer = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
elif self.arch == 'Tiny':
if stage_idx != 0:
downsample_layer = nn.MaxPool2d(2, 2)
else:
downsample_layer = None
elif self.arch in ['L', 'X']:
if stage_idx == 0:
downsample_layer = ConvModule(
in_channels,
out_channels // 2,
3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
else:
downsample_layer = MaxPoolAndStrideConvBlock(
in_channels,
in_channels,
use_in_channels_of_middle=False,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
return downsample_layer
| 11,081 | 37.748252 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/backbones/efficient_rep.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch
import torch.nn as nn
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.models.layers.yolo_bricks import SPPFBottleneck
from mmyolo.registry import MODELS
from ..layers import BepC3StageBlock, RepStageBlock
from ..utils import make_round
from .base_backbone import BaseBackbone
@MODELS.register_module()
class YOLOv6EfficientRep(BaseBackbone):
"""EfficientRep backbone used in YOLOv6.
Args:
arch (str): Architecture of BaseDarknet, from {P5, P6}.
Defaults to P5.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
input_channels (int): Number of input image channels. Defaults to 3.
out_indices (Tuple[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
norm_cfg (dict): Dictionary to construct and config norm layer.
Defaults to dict(type='BN', requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Defaults to False.
block_cfg (dict): Config dict for the block used to build each
layer. Defaults to dict(type='RepVGGBlock').
init_cfg (Union[dict, list[dict]], optional): Initialization config
dict. Defaults to None.
Example:
>>> from mmyolo.models import YOLOv6EfficientRep
>>> import torch
>>> model = YOLOv6EfficientRep()
>>> model.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = model(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, use_spp
arch_settings = {
'P5': [[64, 128, 6, False], [128, 256, 12, False],
[256, 512, 18, False], [512, 1024, 6, True]]
}
def __init__(self,
arch: str = 'P5',
plugins: Union[dict, List[dict]] = None,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Tuple[int] = (2, 3, 4),
frozen_stages: int = -1,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='ReLU', inplace=True),
norm_eval: bool = False,
block_cfg: ConfigType = dict(type='RepVGGBlock'),
init_cfg: OptMultiConfig = None):
self.block_cfg = block_cfg
super().__init__(
self.arch_settings[arch],
deepen_factor,
widen_factor,
input_channels=input_channels,
out_indices=out_indices,
plugins=plugins,
frozen_stages=frozen_stages,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
init_cfg=init_cfg)
def build_stem_layer(self) -> nn.Module:
"""Build a stem layer."""
block_cfg = self.block_cfg.copy()
block_cfg.update(
dict(
in_channels=self.input_channels,
out_channels=int(self.arch_setting[0][0] * self.widen_factor),
kernel_size=3,
stride=2,
))
return MODELS.build(block_cfg)
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, num_blocks, use_spp = setting
in_channels = int(in_channels * self.widen_factor)
out_channels = int(out_channels * self.widen_factor)
num_blocks = make_round(num_blocks, self.deepen_factor)
rep_stage_block = RepStageBlock(
in_channels=out_channels,
out_channels=out_channels,
num_blocks=num_blocks,
block_cfg=self.block_cfg,
)
block_cfg = self.block_cfg.copy()
block_cfg.update(
dict(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2))
stage = []
ef_block = nn.Sequential(MODELS.build(block_cfg), rep_stage_block)
stage.append(ef_block)
if use_spp:
spp = SPPFBottleneck(
in_channels=out_channels,
out_channels=out_channels,
kernel_sizes=5,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(spp)
return stage
def init_weights(self):
if self.init_cfg is None:
"""Initialize the parameters."""
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
else:
super().init_weights()
@MODELS.register_module()
class YOLOv6CSPBep(YOLOv6EfficientRep):
"""CSPBep backbone used in YOLOv6.
Args:
arch (str): Architecture of BaseDarknet, from {P5, P6}.
Defaults to P5.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
input_channels (int): Number of input image channels. Defaults to 3.
out_indices (Tuple[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
norm_cfg (dict): Dictionary to construct and config norm layer.
Defaults to dict(type='BN', requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='LeakyReLU', negative_slope=0.1).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Defaults to False.
block_cfg (dict): Config dict for the block used to build each
layer. Defaults to dict(type='RepVGGBlock').
block_act_cfg (dict): Config dict for activation layer used in each
stage. Defaults to dict(type='SiLU', inplace=True).
init_cfg (Union[dict, list[dict]], optional): Initialization config
dict. Defaults to None.
Example:
>>> from mmyolo.models import YOLOv6CSPBep
>>> import torch
>>> model = YOLOv6CSPBep()
>>> model.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = model(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, use_spp
arch_settings = {
'P5': [[64, 128, 6, False], [128, 256, 12, False],
[256, 512, 18, False], [512, 1024, 6, True]]
}
def __init__(self,
arch: str = 'P5',
plugins: Union[dict, List[dict]] = None,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
hidden_ratio: float = 0.5,
out_indices: Tuple[int] = (2, 3, 4),
frozen_stages: int = -1,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
norm_eval: bool = False,
block_cfg: ConfigType = dict(type='ConvWrapper'),
init_cfg: OptMultiConfig = None):
self.hidden_ratio = hidden_ratio
super().__init__(
arch=arch,
deepen_factor=deepen_factor,
widen_factor=widen_factor,
input_channels=input_channels,
out_indices=out_indices,
plugins=plugins,
frozen_stages=frozen_stages,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
block_cfg=block_cfg,
init_cfg=init_cfg)
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, num_blocks, use_spp = setting
in_channels = int(in_channels * self.widen_factor)
out_channels = int(out_channels * self.widen_factor)
num_blocks = make_round(num_blocks, self.deepen_factor)
rep_stage_block = BepC3StageBlock(
in_channels=out_channels,
out_channels=out_channels,
num_blocks=num_blocks,
hidden_ratio=self.hidden_ratio,
block_cfg=self.block_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
block_cfg = self.block_cfg.copy()
block_cfg.update(
dict(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=2))
stage = []
ef_block = nn.Sequential(MODELS.build(block_cfg), rep_stage_block)
stage.append(ef_block)
if use_spp:
spp = SPPFBottleneck(
in_channels=out_channels,
out_channels=out_channels,
kernel_sizes=5,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(spp)
return stage
| 11,355 | 38.430556 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/backbones/csp_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.models.backbones import BaseBackbone
from mmyolo.models.layers.yolo_bricks import CSPResLayer
from mmyolo.registry import MODELS
@MODELS.register_module()
class PPYOLOECSPResNet(BaseBackbone):
"""CSP-ResNet backbone used in PPYOLOE.
Args:
arch (str): Architecture of CSPNeXt, from {P5, P6}.
Defaults to P5.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
out_indices (Sequence[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
arch_ovewrite (list): Overwrite default arch settings.
Defaults to None.
block_cfg (dict): Config dict for block. Defaults to
dict(type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True)
norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config norm layer. Defaults to dict(type='BN', momentum=0.1,
eps=1e-5).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
attention_cfg (dict): Config dict for `EffectiveSELayer`.
Defaults to dict(type='EffectiveSELayer',
act_cfg=dict(type='HSigmoid')).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict.
use_large_stem (bool): Whether to use large stem layer.
Defaults to False.
"""
# From left to right:
# in_channels, out_channels, num_blocks
arch_settings = {
'P5': [[64, 128, 3], [128, 256, 6], [256, 512, 6], [512, 1024, 3]]
}
def __init__(self,
arch: str = 'P5',
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Tuple[int] = (2, 3, 4),
frozen_stages: int = -1,
plugins: Union[dict, List[dict]] = None,
arch_ovewrite: dict = None,
block_cfg: ConfigType = dict(
type='PPYOLOEBasicBlock', shortcut=True, use_alpha=True),
norm_cfg: ConfigType = dict(
type='BN', momentum=0.1, eps=1e-5),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
attention_cfg: ConfigType = dict(
type='EffectiveSELayer', act_cfg=dict(type='HSigmoid')),
norm_eval: bool = False,
init_cfg: OptMultiConfig = None,
use_large_stem: bool = False):
arch_setting = self.arch_settings[arch]
if arch_ovewrite:
arch_setting = arch_ovewrite
arch_setting = [[
int(in_channels * widen_factor),
int(out_channels * widen_factor),
round(num_blocks * deepen_factor)
] for in_channels, out_channels, num_blocks in arch_setting]
self.block_cfg = block_cfg
self.use_large_stem = use_large_stem
self.attention_cfg = attention_cfg
super().__init__(
arch_setting,
deepen_factor,
widen_factor,
input_channels=input_channels,
out_indices=out_indices,
plugins=plugins,
frozen_stages=frozen_stages,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
init_cfg=init_cfg)
def build_stem_layer(self) -> nn.Module:
"""Build a stem layer."""
if self.use_large_stem:
stem = nn.Sequential(
ConvModule(
self.input_channels,
self.arch_setting[0][0] // 2,
3,
stride=2,
padding=1,
act_cfg=self.act_cfg,
norm_cfg=self.norm_cfg),
ConvModule(
self.arch_setting[0][0] // 2,
self.arch_setting[0][0] // 2,
3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
self.arch_setting[0][0] // 2,
self.arch_setting[0][0],
3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
else:
stem = nn.Sequential(
ConvModule(
self.input_channels,
self.arch_setting[0][0] // 2,
3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
self.arch_setting[0][0] // 2,
self.arch_setting[0][0],
3,
stride=1,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
return stem
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, num_blocks = setting
cspres_layer = CSPResLayer(
in_channels=in_channels,
out_channels=out_channels,
num_block=num_blocks,
block_cfg=self.block_cfg,
stride=2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
attention_cfg=self.attention_cfg,
use_spp=False)
return [cspres_layer]
| 6,791 | 38.952941 | 78 | py |
mmyolo | mmyolo-main/mmyolo/models/backbones/base_backbone.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from typing import List, Sequence, Union
import torch
import torch.nn as nn
from mmcv.cnn import build_plugin_layer
from mmdet.utils import ConfigType, OptMultiConfig
from mmengine.model import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmyolo.registry import MODELS
@MODELS.register_module()
class BaseBackbone(BaseModule, metaclass=ABCMeta):
"""BaseBackbone backbone used in YOLO series.
.. code:: text
Backbone model structure diagram
+-----------+
| input |
+-----------+
v
+-----------+
| stem |
| layer |
+-----------+
v
+-----------+
| stage |
| layer 1 |
+-----------+
v
+-----------+
| stage |
| layer 2 |
+-----------+
v
......
v
+-----------+
| stage |
| layer n |
+-----------+
In P5 model, n=4
In P6 model, n=5
Args:
arch_setting (list): Architecture of BaseBackbone.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
input_channels: Number of input image channels. Defaults to 3.
out_indices (Sequence[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
norm_cfg (dict): Dictionary to construct and config norm layer.
Defaults to None.
act_cfg (dict): Config dict for activation layer.
Defaults to None.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Defaults to False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Defaults to None.
"""
def __init__(self,
arch_setting: list,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Sequence[int] = (2, 3, 4),
frozen_stages: int = -1,
plugins: Union[dict, List[dict]] = None,
norm_cfg: ConfigType = None,
act_cfg: ConfigType = None,
norm_eval: bool = False,
init_cfg: OptMultiConfig = None):
super().__init__(init_cfg)
self.num_stages = len(arch_setting)
self.arch_setting = arch_setting
assert set(out_indices).issubset(
i for i in range(len(arch_setting) + 1))
if frozen_stages not in range(-1, len(arch_setting) + 1):
raise ValueError('"frozen_stages" must be in range(-1, '
'len(arch_setting) + 1). But received '
f'{frozen_stages}')
self.input_channels = input_channels
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.widen_factor = widen_factor
self.deepen_factor = deepen_factor
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.plugins = plugins
self.stem = self.build_stem_layer()
self.layers = ['stem']
for idx, setting in enumerate(arch_setting):
stage = []
stage += self.build_stage_layer(idx, setting)
if plugins is not None:
stage += self.make_stage_plugins(plugins, idx, setting)
self.add_module(f'stage{idx + 1}', nn.Sequential(*stage))
self.layers.append(f'stage{idx + 1}')
@abstractmethod
def build_stem_layer(self):
"""Build a stem layer."""
pass
@abstractmethod
def build_stage_layer(self, stage_idx: int, setting: list):
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
pass
def make_stage_plugins(self, plugins, stage_idx, setting):
"""Make plugins for backbone ``stage_idx`` th stage.
Currently we support to insert ``context_block``,
``empirical_attention_block``, ``nonlocal_block``, ``dropout_block``
into the backbone.
An example of plugins format could be:
Examples:
>>> plugins=[
... dict(cfg=dict(type='xxx', arg1='xxx'),
... stages=(False, True, True, True)),
... dict(cfg=dict(type='yyy'),
... stages=(True, True, True, True)),
... ]
>>> model = YOLOv5CSPDarknet()
>>> stage_plugins = model.make_stage_plugins(plugins, 0, setting)
>>> assert len(stage_plugins) == 1
Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
.. code-block:: none
conv1 -> conv2 -> conv3 -> yyy
Suppose ``stage_idx=1``, the structure of blocks in the stage would be:
.. code-block:: none
conv1 -> conv2 -> conv3 -> xxx -> yyy
Args:
plugins (list[dict]): List of plugins cfg to build. The postfix is
required if multiple same type plugins are inserted.
stage_idx (int): Index of stage to build
If stages is missing, the plugin would be applied to all
stages.
setting (list): The architecture setting of a stage layer.
Returns:
list[nn.Module]: Plugins for current stage
"""
# TODO: It is not general enough to support any channel and needs
# to be refactored
in_channels = int(setting[1] * self.widen_factor)
plugin_layers = []
for plugin in plugins:
plugin = plugin.copy()
stages = plugin.pop('stages', None)
assert stages is None or len(stages) == self.num_stages
if stages is None or stages[stage_idx]:
name, layer = build_plugin_layer(
plugin['cfg'], in_channels=in_channels)
plugin_layers.append(layer)
return plugin_layers
def _freeze_stages(self):
"""Freeze the parameters of the specified stage so that they are no
longer updated."""
if self.frozen_stages >= 0:
for i in range(self.frozen_stages + 1):
m = getattr(self, self.layers[i])
m.eval()
for param in m.parameters():
param.requires_grad = False
def train(self, mode: bool = True):
"""Convert the model into training mode while keep normalization layer
frozen."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
def forward(self, x: torch.Tensor) -> tuple:
"""Forward batch_inputs from the data_preprocessor."""
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
| 7,920 | 34.048673 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/backbones/cspnext.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from typing import List, Sequence, Union
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.backbones.csp_darknet import CSPLayer
from mmdet.utils import ConfigType, OptConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from ..layers import SPPFBottleneck
from .base_backbone import BaseBackbone
@MODELS.register_module()
class CSPNeXt(BaseBackbone):
"""CSPNeXt backbone used in RTMDet.
Args:
arch (str): Architecture of CSPNeXt, from {P5, P6}.
Defaults to P5.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
out_indices (Sequence[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.Defaults to
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Defaults to 0.5.
arch_ovewrite (list): Overwrite default arch settings.
Defaults to None.
channel_attention (bool): Whether to add channel attention in each
stage. Defaults to True.
conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for
convolution layer. Defaults to None.
norm_cfg (:obj:`ConfigDict` or dict): Dictionary to construct and
config norm layer. Defaults to dict(type='BN', requires_grad=True).
act_cfg (:obj:`ConfigDict` or dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (:obj:`ConfigDict` or dict or list[dict] or
list[:obj:`ConfigDict`]): Initialization config dict.
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 6, True, False], [512, 1024, 3, False, True]],
'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 6, True, False], [512, 768, 3, True, False],
[768, 1024, 3, False, True]]
}
def __init__(
self,
arch: str = 'P5',
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Sequence[int] = (2, 3, 4),
frozen_stages: int = -1,
plugins: Union[dict, List[dict]] = None,
use_depthwise: bool = False,
expand_ratio: float = 0.5,
arch_ovewrite: dict = None,
channel_attention: bool = True,
conv_cfg: OptConfigType = None,
norm_cfg: ConfigType = dict(type='BN'),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
norm_eval: bool = False,
init_cfg: OptMultiConfig = dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')
) -> None:
arch_setting = self.arch_settings[arch]
if arch_ovewrite:
arch_setting = arch_ovewrite
self.channel_attention = channel_attention
self.use_depthwise = use_depthwise
self.conv = DepthwiseSeparableConvModule \
if use_depthwise else ConvModule
self.expand_ratio = expand_ratio
self.conv_cfg = conv_cfg
super().__init__(
arch_setting,
deepen_factor,
widen_factor,
input_channels,
out_indices,
frozen_stages=frozen_stages,
plugins=plugins,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
init_cfg=init_cfg)
def build_stem_layer(self) -> nn.Module:
"""Build a stem layer."""
stem = nn.Sequential(
ConvModule(
3,
int(self.arch_setting[0][0] * self.widen_factor // 2),
3,
padding=1,
stride=2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
int(self.arch_setting[0][0] * self.widen_factor // 2),
int(self.arch_setting[0][0] * self.widen_factor // 2),
3,
padding=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg),
ConvModule(
int(self.arch_setting[0][0] * self.widen_factor // 2),
int(self.arch_setting[0][0] * self.widen_factor),
3,
padding=1,
stride=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg))
return stem
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, num_blocks, add_identity, use_spp = setting
in_channels = int(in_channels * self.widen_factor)
out_channels = int(out_channels * self.widen_factor)
num_blocks = max(round(num_blocks * self.deepen_factor), 1)
stage = []
conv_layer = self.conv(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(conv_layer)
if use_spp:
spp = SPPFBottleneck(
out_channels,
out_channels,
kernel_sizes=5,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(spp)
csp_layer = CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
use_depthwise=self.use_depthwise,
use_cspnext_block=True,
expand_ratio=self.expand_ratio,
channel_attention=self.channel_attention,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(csp_layer)
return stage
| 7,258 | 37.611702 | 79 | py |
mmyolo | mmyolo-main/mmyolo/models/backbones/csp_darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple, Union
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.backbones.csp_darknet import CSPLayer, Focus
from mmdet.utils import ConfigType, OptMultiConfig
from mmyolo.registry import MODELS
from ..layers import CSPLayerWithTwoConv, SPPFBottleneck
from ..utils import make_divisible, make_round
from .base_backbone import BaseBackbone
@MODELS.register_module()
class YOLOv5CSPDarknet(BaseBackbone):
"""CSP-Darknet backbone used in YOLOv5.
Args:
arch (str): Architecture of CSP-Darknet, from {P5, P6}.
Defaults to P5.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
input_channels (int): Number of input image channels. Defaults to: 3.
out_indices (Tuple[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
norm_cfg (dict): Dictionary to construct and config norm layer.
Defaults to dict(type='BN', requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Defaults to False.
init_cfg (Union[dict,list[dict]], optional): Initialization config
dict. Defaults to None.
Example:
>>> from mmyolo.models import YOLOv5CSPDarknet
>>> import torch
>>> model = YOLOv5CSPDarknet()
>>> model.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = model(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 9, True, False], [512, 1024, 3, True, True]],
'P6': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 9, True, False], [512, 768, 3, True, False],
[768, 1024, 3, True, True]]
}
def __init__(self,
arch: str = 'P5',
plugins: Union[dict, List[dict]] = None,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Tuple[int] = (2, 3, 4),
frozen_stages: int = -1,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
norm_eval: bool = False,
init_cfg: OptMultiConfig = None):
super().__init__(
self.arch_settings[arch],
deepen_factor,
widen_factor,
input_channels=input_channels,
out_indices=out_indices,
plugins=plugins,
frozen_stages=frozen_stages,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
init_cfg=init_cfg)
def build_stem_layer(self) -> nn.Module:
"""Build a stem layer."""
return ConvModule(
self.input_channels,
make_divisible(self.arch_setting[0][0], self.widen_factor),
kernel_size=6,
stride=2,
padding=2,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, num_blocks, add_identity, use_spp = setting
in_channels = make_divisible(in_channels, self.widen_factor)
out_channels = make_divisible(out_channels, self.widen_factor)
num_blocks = make_round(num_blocks, self.deepen_factor)
stage = []
conv_layer = ConvModule(
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(conv_layer)
csp_layer = CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(csp_layer)
if use_spp:
spp = SPPFBottleneck(
out_channels,
out_channels,
kernel_sizes=5,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(spp)
return stage
def init_weights(self):
"""Initialize the parameters."""
if self.init_cfg is None:
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
else:
super().init_weights()
@MODELS.register_module()
class YOLOv8CSPDarknet(BaseBackbone):
"""CSP-Darknet backbone used in YOLOv8.
Args:
arch (str): Architecture of CSP-Darknet, from {P5}.
Defaults to P5.
last_stage_out_channels (int): Final layer output channel.
Defaults to 1024.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
input_channels (int): Number of input image channels. Defaults to: 3.
out_indices (Tuple[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
norm_cfg (dict): Dictionary to construct and config norm layer.
Defaults to dict(type='BN', requires_grad=True).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Defaults to False.
init_cfg (Union[dict,list[dict]], optional): Initialization config
dict. Defaults to None.
Example:
>>> from mmyolo.models import YOLOv8CSPDarknet
>>> import torch
>>> model = YOLOv8CSPDarknet()
>>> model.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = model(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
# the final out_channels will be set according to the param.
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 6, True, False],
[256, 512, 6, True, False], [512, None, 3, True, True]],
}
def __init__(self,
arch: str = 'P5',
last_stage_out_channels: int = 1024,
plugins: Union[dict, List[dict]] = None,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Tuple[int] = (2, 3, 4),
frozen_stages: int = -1,
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
norm_eval: bool = False,
init_cfg: OptMultiConfig = None):
self.arch_settings[arch][-1][1] = last_stage_out_channels
super().__init__(
self.arch_settings[arch],
deepen_factor,
widen_factor,
input_channels=input_channels,
out_indices=out_indices,
plugins=plugins,
frozen_stages=frozen_stages,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
norm_eval=norm_eval,
init_cfg=init_cfg)
def build_stem_layer(self) -> nn.Module:
"""Build a stem layer."""
return ConvModule(
self.input_channels,
make_divisible(self.arch_setting[0][0], self.widen_factor),
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, num_blocks, add_identity, use_spp = setting
in_channels = make_divisible(in_channels, self.widen_factor)
out_channels = make_divisible(out_channels, self.widen_factor)
num_blocks = make_round(num_blocks, self.deepen_factor)
stage = []
conv_layer = ConvModule(
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(conv_layer)
csp_layer = CSPLayerWithTwoConv(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(csp_layer)
if use_spp:
spp = SPPFBottleneck(
out_channels,
out_channels,
kernel_sizes=5,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(spp)
return stage
def init_weights(self):
"""Initialize the parameters."""
if self.init_cfg is None:
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
else:
super().init_weights()
@MODELS.register_module()
class YOLOXCSPDarknet(BaseBackbone):
"""CSP-Darknet backbone used in YOLOX.
Args:
arch (str): Architecture of CSP-Darknet, from {P5, P6}.
Defaults to P5.
plugins (list[dict]): List of plugins for stages, each dict contains:
- cfg (dict, required): Cfg dict to build plugin.
- stages (tuple[bool], optional): Stages to apply plugin, length
should be same as 'num_stages'.
deepen_factor (float): Depth multiplier, multiply number of
blocks in CSP layer by this amount. Defaults to 1.0.
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Defaults to 1.0.
input_channels (int): Number of input image channels. Defaults to 3.
out_indices (Tuple[int]): Output from which stages.
Defaults to (2, 3, 4).
frozen_stages (int): Stages to be frozen (stop grad and set eval
mode). -1 means not freezing any parameters. Defaults to -1.
use_depthwise (bool): Whether to use depthwise separable convolution.
Defaults to False.
spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP
layers. Defaults to (5, 9, 13).
norm_cfg (dict): Dictionary to construct and config norm layer.
Defaults to dict(type='BN', momentum=0.03, eps=0.001).
act_cfg (dict): Config dict for activation layer.
Defaults to dict(type='SiLU', inplace=True).
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
init_cfg (Union[dict,list[dict]], optional): Initialization config
dict. Defaults to None.
Example:
>>> from mmyolo.models import YOLOXCSPDarknet
>>> import torch
>>> model = YOLOXCSPDarknet()
>>> model.eval()
>>> inputs = torch.rand(1, 3, 416, 416)
>>> level_outputs = model(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
...
(1, 256, 52, 52)
(1, 512, 26, 26)
(1, 1024, 13, 13)
"""
# From left to right:
# in_channels, out_channels, num_blocks, add_identity, use_spp
arch_settings = {
'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],
[256, 512, 9, True, False], [512, 1024, 3, False, True]],
}
def __init__(self,
arch: str = 'P5',
plugins: Union[dict, List[dict]] = None,
deepen_factor: float = 1.0,
widen_factor: float = 1.0,
input_channels: int = 3,
out_indices: Tuple[int] = (2, 3, 4),
frozen_stages: int = -1,
use_depthwise: bool = False,
spp_kernal_sizes: Tuple[int] = (5, 9, 13),
norm_cfg: ConfigType = dict(
type='BN', momentum=0.03, eps=0.001),
act_cfg: ConfigType = dict(type='SiLU', inplace=True),
norm_eval: bool = False,
init_cfg: OptMultiConfig = None):
self.use_depthwise = use_depthwise
self.spp_kernal_sizes = spp_kernal_sizes
super().__init__(self.arch_settings[arch], deepen_factor, widen_factor,
input_channels, out_indices, frozen_stages, plugins,
norm_cfg, act_cfg, norm_eval, init_cfg)
def build_stem_layer(self) -> nn.Module:
"""Build a stem layer."""
return Focus(
3,
make_divisible(64, self.widen_factor),
kernel_size=3,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def build_stage_layer(self, stage_idx: int, setting: list) -> list:
"""Build a stage layer.
Args:
stage_idx (int): The index of a stage layer.
setting (list): The architecture setting of a stage layer.
"""
in_channels, out_channels, num_blocks, add_identity, use_spp = setting
in_channels = make_divisible(in_channels, self.widen_factor)
out_channels = make_divisible(out_channels, self.widen_factor)
num_blocks = make_round(num_blocks, self.deepen_factor)
stage = []
conv = DepthwiseSeparableConvModule \
if self.use_depthwise else ConvModule
conv_layer = conv(
in_channels,
out_channels,
kernel_size=3,
stride=2,
padding=1,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(conv_layer)
if use_spp:
spp = SPPFBottleneck(
out_channels,
out_channels,
kernel_sizes=self.spp_kernal_sizes,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(spp)
csp_layer = CSPLayer(
out_channels,
out_channels,
num_blocks=num_blocks,
add_identity=add_identity,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
stage.append(csp_layer)
return stage
| 17,158 | 39.091121 | 79 | py |
mmyolo | mmyolo-main/mmyolo/datasets/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Sequence
import numpy as np
import torch
from mmengine.dataset import COLLATE_FUNCTIONS
from ..registry import TASK_UTILS
@COLLATE_FUNCTIONS.register_module()
def yolov5_collate(data_batch: Sequence,
use_ms_training: bool = False) -> dict:
"""Rewrite collate_fn to get faster training speed.
Args:
data_batch (Sequence): Batch of data.
use_ms_training (bool): Whether to use multi-scale training.
"""
batch_imgs = []
batch_bboxes_labels = []
batch_masks = []
for i in range(len(data_batch)):
datasamples = data_batch[i]['data_samples']
inputs = data_batch[i]['inputs']
batch_imgs.append(inputs)
gt_bboxes = datasamples.gt_instances.bboxes.tensor
gt_labels = datasamples.gt_instances.labels
if 'masks' in datasamples.gt_instances:
masks = datasamples.gt_instances.masks.to_tensor(
dtype=torch.bool, device=gt_bboxes.device)
batch_masks.append(masks)
batch_idx = gt_labels.new_full((len(gt_labels), 1), i)
bboxes_labels = torch.cat((batch_idx, gt_labels[:, None], gt_bboxes),
dim=1)
batch_bboxes_labels.append(bboxes_labels)
collated_results = {
'data_samples': {
'bboxes_labels': torch.cat(batch_bboxes_labels, 0)
}
}
if len(batch_masks) > 0:
collated_results['data_samples']['masks'] = torch.cat(batch_masks, 0)
if use_ms_training:
collated_results['inputs'] = batch_imgs
else:
collated_results['inputs'] = torch.stack(batch_imgs, 0)
return collated_results
@TASK_UTILS.register_module()
class BatchShapePolicy:
"""BatchShapePolicy is only used in the testing phase, which can reduce the
number of pad pixels during batch inference.
Args:
batch_size (int): Single GPU batch size during batch inference.
Defaults to 32.
img_size (int): Expected output image size. Defaults to 640.
size_divisor (int): The minimum size that is divisible
by size_divisor. Defaults to 32.
extra_pad_ratio (float): Extra pad ratio. Defaults to 0.5.
"""
def __init__(self,
batch_size: int = 32,
img_size: int = 640,
size_divisor: int = 32,
extra_pad_ratio: float = 0.5):
self.batch_size = batch_size
self.img_size = img_size
self.size_divisor = size_divisor
self.extra_pad_ratio = extra_pad_ratio
def __call__(self, data_list: List[dict]) -> List[dict]:
image_shapes = []
for data_info in data_list:
image_shapes.append((data_info['width'], data_info['height']))
image_shapes = np.array(image_shapes, dtype=np.float64)
n = len(image_shapes) # number of images
batch_index = np.floor(np.arange(n) / self.batch_size).astype(
np.int64) # batch index
number_of_batches = batch_index[-1] + 1 # number of batches
aspect_ratio = image_shapes[:, 1] / image_shapes[:, 0] # aspect ratio
irect = aspect_ratio.argsort()
data_list = [data_list[i] for i in irect]
aspect_ratio = aspect_ratio[irect]
# Set training image shapes
shapes = [[1, 1]] * number_of_batches
for i in range(number_of_batches):
aspect_ratio_index = aspect_ratio[batch_index == i]
min_index, max_index = aspect_ratio_index.min(
), aspect_ratio_index.max()
if max_index < 1:
shapes[i] = [max_index, 1]
elif min_index > 1:
shapes[i] = [1, 1 / min_index]
batch_shapes = np.ceil(
np.array(shapes) * self.img_size / self.size_divisor +
self.extra_pad_ratio).astype(np.int64) * self.size_divisor
for i, data_info in enumerate(data_list):
data_info['batch_shape'] = batch_shapes[batch_index[i]]
return data_list
| 4,075 | 34.443478 | 79 | py |
mmyolo | mmyolo-main/mmyolo/datasets/transforms/mix_img_transforms.py | # Copyright (c) OpenMMLab. All rights reserved.
import collections
import copy
from abc import ABCMeta, abstractmethod
from typing import Optional, Sequence, Tuple, Union
import mmcv
import numpy as np
from mmcv.transforms import BaseTransform
from mmdet.structures.bbox import autocast_box_type
from mmengine.dataset import BaseDataset
from mmengine.dataset.base_dataset import Compose
from numpy import random
from mmyolo.registry import TRANSFORMS
class BaseMixImageTransform(BaseTransform, metaclass=ABCMeta):
"""A Base Transform of multiple images mixed.
Suitable for training on multiple images mixed data augmentation like
mosaic and mixup.
Cached mosaic transform will random select images from the cache
and combine them into one output image if use_cached is True.
Args:
pre_transform(Sequence[str]): Sequence of transform object or
config dict to be composed. Defaults to None.
prob(float): The transformation probability. Defaults to 1.0.
use_cached (bool): Whether to use cache. Defaults to False.
max_cached_images (int): The maximum length of the cache. The larger
the cache, the stronger the randomness of this transform. As a
rule of thumb, providing 10 caches for each image suffices for
randomness. Defaults to 40.
random_pop (bool): Whether to randomly pop a result from the cache
when the cache is full. If set to False, use FIFO popping method.
Defaults to True.
max_refetch (int): The maximum number of retry iterations for getting
valid results from the pipeline. If the number of iterations is
greater than `max_refetch`, but results is still None, then the
iteration is terminated and raise the error. Defaults to 15.
"""
def __init__(self,
pre_transform: Optional[Sequence[str]] = None,
prob: float = 1.0,
use_cached: bool = False,
max_cached_images: int = 40,
random_pop: bool = True,
max_refetch: int = 15):
self.max_refetch = max_refetch
self.prob = prob
self.use_cached = use_cached
self.max_cached_images = max_cached_images
self.random_pop = random_pop
self.results_cache = []
if pre_transform is None:
self.pre_transform = None
else:
self.pre_transform = Compose(pre_transform)
@abstractmethod
def get_indexes(self, dataset: Union[BaseDataset,
list]) -> Union[list, int]:
"""Call function to collect indexes.
Args:
dataset (:obj:`Dataset` or list): The dataset or cached list.
Returns:
list or int: indexes.
"""
pass
@abstractmethod
def mix_img_transform(self, results: dict) -> dict:
"""Mixed image data transformation.
Args:
results (dict): Result dict.
Returns:
results (dict): Updated result dict.
"""
pass
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""Data augmentation function.
The transform steps are as follows:
1. Randomly generate index list of other images.
2. Before Mosaic or MixUp need to go through the necessary
pre_transform, such as MixUp' pre_transform pipeline
include: 'LoadImageFromFile','LoadAnnotations',
'Mosaic' and 'RandomAffine'.
3. Use mix_img_transform function to implement specific
mix operations.
Args:
results (dict): Result dict.
Returns:
results (dict): Updated result dict.
"""
if random.uniform(0, 1) > self.prob:
return results
if self.use_cached:
# Be careful: deep copying can be very time-consuming
# if results includes dataset.
dataset = results.pop('dataset', None)
self.results_cache.append(copy.deepcopy(results))
if len(self.results_cache) > self.max_cached_images:
if self.random_pop:
index = random.randint(0, len(self.results_cache) - 1)
else:
index = 0
self.results_cache.pop(index)
if len(self.results_cache) <= 4:
return results
else:
assert 'dataset' in results
# Be careful: deep copying can be very time-consuming
# if results includes dataset.
dataset = results.pop('dataset', None)
for _ in range(self.max_refetch):
# get index of one or three other images
if self.use_cached:
indexes = self.get_indexes(self.results_cache)
else:
indexes = self.get_indexes(dataset)
if not isinstance(indexes, collections.abc.Sequence):
indexes = [indexes]
if self.use_cached:
mix_results = [
copy.deepcopy(self.results_cache[i]) for i in indexes
]
else:
# get images information will be used for Mosaic or MixUp
mix_results = [
copy.deepcopy(dataset.get_data_info(index))
for index in indexes
]
if self.pre_transform is not None:
for i, data in enumerate(mix_results):
# pre_transform may also require dataset
data.update({'dataset': dataset})
# before Mosaic or MixUp need to go through
# the necessary pre_transform
_results = self.pre_transform(data)
_results.pop('dataset')
mix_results[i] = _results
if None not in mix_results:
results['mix_results'] = mix_results
break
print('Repeated calculation')
else:
raise RuntimeError(
'The loading pipeline of the original dataset'
' always return None. Please check the correctness '
'of the dataset and its pipeline.')
# Mosaic or MixUp
results = self.mix_img_transform(results)
if 'mix_results' in results:
results.pop('mix_results')
results['dataset'] = dataset
return results
@TRANSFORMS.register_module()
class Mosaic(BaseMixImageTransform):
"""Mosaic augmentation.
Given 4 images, mosaic transform combines them into
one output image. The output image is composed of the parts from each sub-
image.
.. code:: text
mosaic transform
center_x
+------------------------------+
| pad | |
| +-----------+ pad |
| | | |
| | image1 +-----------+
| | | |
| | | image2 |
center_y |----+-+-----------+-----------+
| | cropped | |
|pad | image3 | image4 |
| | | |
+----|-------------+-----------+
| |
+-------------+
The mosaic transform steps are as follows:
1. Choose the mosaic center as the intersections of 4 images
2. Get the left top image according to the index, and randomly
sample another 3 images from the custom dataset.
3. Sub image will be cropped if image is larger than mosaic patch
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- mix_results (List[dict])
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
img_scale (Sequence[int]): Image size after mosaic pipeline of single
image. The shape order should be (width, height).
Defaults to (640, 640).
center_ratio_range (Sequence[float]): Center ratio range of mosaic
output. Defaults to (0.5, 1.5).
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
pad_val (int): Pad value. Defaults to 114.
pre_transform(Sequence[dict]): Sequence of transform object or
config dict to be composed.
prob (float): Probability of applying this transformation.
Defaults to 1.0.
use_cached (bool): Whether to use cache. Defaults to False.
max_cached_images (int): The maximum length of the cache. The larger
the cache, the stronger the randomness of this transform. As a
rule of thumb, providing 10 caches for each image suffices for
randomness. Defaults to 40.
random_pop (bool): Whether to randomly pop a result from the cache
when the cache is full. If set to False, use FIFO popping method.
Defaults to True.
max_refetch (int): The maximum number of retry iterations for getting
valid results from the pipeline. If the number of iterations is
greater than `max_refetch`, but results is still None, then the
iteration is terminated and raise the error. Defaults to 15.
"""
def __init__(self,
img_scale: Tuple[int, int] = (640, 640),
center_ratio_range: Tuple[float, float] = (0.5, 1.5),
bbox_clip_border: bool = True,
pad_val: float = 114.0,
pre_transform: Sequence[dict] = None,
prob: float = 1.0,
use_cached: bool = False,
max_cached_images: int = 40,
random_pop: bool = True,
max_refetch: int = 15):
assert isinstance(img_scale, tuple)
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \
f'got {prob}.'
if use_cached:
assert max_cached_images >= 4, 'The length of cache must >= 4, ' \
f'but got {max_cached_images}.'
super().__init__(
pre_transform=pre_transform,
prob=prob,
use_cached=use_cached,
max_cached_images=max_cached_images,
random_pop=random_pop,
max_refetch=max_refetch)
self.img_scale = img_scale
self.center_ratio_range = center_ratio_range
self.bbox_clip_border = bbox_clip_border
self.pad_val = pad_val
def get_indexes(self, dataset: Union[BaseDataset, list]) -> list:
"""Call function to collect indexes.
Args:
dataset (:obj:`Dataset` or list): The dataset or cached list.
Returns:
list: indexes.
"""
indexes = [random.randint(0, len(dataset)) for _ in range(3)]
return indexes
def mix_img_transform(self, results: dict) -> dict:
"""Mixed image data transformation.
Args:
results (dict): Result dict.
Returns:
results (dict): Updated result dict.
"""
assert 'mix_results' in results
mosaic_bboxes = []
mosaic_bboxes_labels = []
mosaic_ignore_flags = []
mosaic_masks = []
with_mask = True if 'gt_masks' in results else False
# self.img_scale is wh format
img_scale_w, img_scale_h = self.img_scale
if len(results['img'].shape) == 3:
mosaic_img = np.full(
(int(img_scale_h * 2), int(img_scale_w * 2), 3),
self.pad_val,
dtype=results['img'].dtype)
else:
mosaic_img = np.full((int(img_scale_h * 2), int(img_scale_w * 2)),
self.pad_val,
dtype=results['img'].dtype)
# mosaic center x, y
center_x = int(random.uniform(*self.center_ratio_range) * img_scale_w)
center_y = int(random.uniform(*self.center_ratio_range) * img_scale_h)
center_position = (center_x, center_y)
loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')
for i, loc in enumerate(loc_strs):
if loc == 'top_left':
results_patch = results
else:
results_patch = results['mix_results'][i - 1]
img_i = results_patch['img']
h_i, w_i = img_i.shape[:2]
# keep_ratio resize
scale_ratio_i = min(img_scale_h / h_i, img_scale_w / w_i)
img_i = mmcv.imresize(
img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))
# compute the combine parameters
paste_coord, crop_coord = self._mosaic_combine(
loc, center_position, img_i.shape[:2][::-1])
x1_p, y1_p, x2_p, y2_p = paste_coord
x1_c, y1_c, x2_c, y2_c = crop_coord
# crop and paste image
mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]
# adjust coordinate
gt_bboxes_i = results_patch['gt_bboxes']
gt_bboxes_labels_i = results_patch['gt_bboxes_labels']
gt_ignore_flags_i = results_patch['gt_ignore_flags']
padw = x1_p - x1_c
padh = y1_p - y1_c
gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])
gt_bboxes_i.translate_([padw, padh])
mosaic_bboxes.append(gt_bboxes_i)
mosaic_bboxes_labels.append(gt_bboxes_labels_i)
mosaic_ignore_flags.append(gt_ignore_flags_i)
if with_mask and results_patch.get('gt_masks', None) is not None:
gt_masks_i = results_patch['gt_masks']
gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))
gt_masks_i = gt_masks_i.translate(
out_shape=(int(self.img_scale[0] * 2),
int(self.img_scale[1] * 2)),
offset=padw,
direction='horizontal')
gt_masks_i = gt_masks_i.translate(
out_shape=(int(self.img_scale[0] * 2),
int(self.img_scale[1] * 2)),
offset=padh,
direction='vertical')
mosaic_masks.append(gt_masks_i)
mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)
mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)
mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)
if self.bbox_clip_border:
mosaic_bboxes.clip_([2 * img_scale_h, 2 * img_scale_w])
if with_mask:
mosaic_masks = mosaic_masks[0].cat(mosaic_masks)
results['gt_masks'] = mosaic_masks
else:
# remove outside bboxes
inside_inds = mosaic_bboxes.is_inside(
[2 * img_scale_h, 2 * img_scale_w]).numpy()
mosaic_bboxes = mosaic_bboxes[inside_inds]
mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]
mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]
if with_mask:
mosaic_masks = mosaic_masks[0].cat(mosaic_masks)[inside_inds]
results['gt_masks'] = mosaic_masks
results['img'] = mosaic_img
results['img_shape'] = mosaic_img.shape
results['gt_bboxes'] = mosaic_bboxes
results['gt_bboxes_labels'] = mosaic_bboxes_labels
results['gt_ignore_flags'] = mosaic_ignore_flags
return results
def _mosaic_combine(
self, loc: str, center_position_xy: Sequence[float],
img_shape_wh: Sequence[int]) -> Tuple[Tuple[int], Tuple[int]]:
"""Calculate global coordinate of mosaic image and local coordinate of
cropped sub-image.
Args:
loc (str): Index for the sub-image, loc in ('top_left',
'top_right', 'bottom_left', 'bottom_right').
center_position_xy (Sequence[float]): Mixing center for 4 images,
(x, y).
img_shape_wh (Sequence[int]): Width and height of sub-image
Returns:
tuple[tuple[float]]: Corresponding coordinate of pasting and
cropping
- paste_coord (tuple): paste corner coordinate in mosaic image.
- crop_coord (tuple): crop corner coordinate in mosaic image.
"""
assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')
if loc == 'top_left':
# index0 to top left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
max(center_position_xy[1] - img_shape_wh[1], 0), \
center_position_xy[0], \
center_position_xy[1]
crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (
y2 - y1), img_shape_wh[0], img_shape_wh[1]
elif loc == 'top_right':
# index1 to top right part of image
x1, y1, x2, y2 = center_position_xy[0], \
max(center_position_xy[1] - img_shape_wh[1], 0), \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[0] * 2), \
center_position_xy[1]
crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(
img_shape_wh[0], x2 - x1), img_shape_wh[1]
elif loc == 'bottom_left':
# index2 to bottom left part of image
x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \
center_position_xy[1], \
center_position_xy[0], \
min(self.img_scale[1] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(
y2 - y1, img_shape_wh[1])
else:
# index3 to bottom right part of image
x1, y1, x2, y2 = center_position_xy[0], \
center_position_xy[1], \
min(center_position_xy[0] + img_shape_wh[0],
self.img_scale[0] * 2), \
min(self.img_scale[1] * 2, center_position_xy[1] +
img_shape_wh[1])
crop_coord = 0, 0, min(img_shape_wh[0],
x2 - x1), min(y2 - y1, img_shape_wh[1])
paste_coord = x1, y1, x2, y2
return paste_coord, crop_coord
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'center_ratio_range={self.center_ratio_range}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob})'
return repr_str
@TRANSFORMS.register_module()
class Mosaic9(BaseMixImageTransform):
"""Mosaic9 augmentation.
Given 9 images, mosaic transform combines them into
one output image. The output image is composed of the parts from each sub-
image.
.. code:: text
+-------------------------------+------------+
| pad | pad | |
| +----------+ | |
| | +---------------+ top_right |
| | | top | image2 |
| | top_left | image1 | |
| | image8 o--------+------+--------+---+
| | | | | |
+----+----------+ | right |pad|
| | center | image3 | |
| left | image0 +---------------+---|
| image7 | | | |
+---+-----------+---+--------+ | |
| | cropped | | bottom_right |pad|
| |bottom_left| | image4 | |
| | image6 | bottom | | |
+---|-----------+ image5 +---------------+---|
| pad | | pad |
+-----------+------------+-------------------+
The mosaic transform steps are as follows:
1. Get the center image according to the index, and randomly
sample another 8 images from the custom dataset.
2. Randomly offset the image after Mosaic
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- mix_results (List[dict])
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
img_scale (Sequence[int]): Image size after mosaic pipeline of single
image. The shape order should be (width, height).
Defaults to (640, 640).
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
pad_val (int): Pad value. Defaults to 114.
pre_transform(Sequence[dict]): Sequence of transform object or
config dict to be composed.
prob (float): Probability of applying this transformation.
Defaults to 1.0.
use_cached (bool): Whether to use cache. Defaults to False.
max_cached_images (int): The maximum length of the cache. The larger
the cache, the stronger the randomness of this transform. As a
rule of thumb, providing 5 caches for each image suffices for
randomness. Defaults to 50.
random_pop (bool): Whether to randomly pop a result from the cache
when the cache is full. If set to False, use FIFO popping method.
Defaults to True.
max_refetch (int): The maximum number of retry iterations for getting
valid results from the pipeline. If the number of iterations is
greater than `max_refetch`, but results is still None, then the
iteration is terminated and raise the error. Defaults to 15.
"""
def __init__(self,
img_scale: Tuple[int, int] = (640, 640),
bbox_clip_border: bool = True,
pad_val: Union[float, int] = 114.0,
pre_transform: Sequence[dict] = None,
prob: float = 1.0,
use_cached: bool = False,
max_cached_images: int = 50,
random_pop: bool = True,
max_refetch: int = 15):
assert isinstance(img_scale, tuple)
assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \
f'got {prob}.'
if use_cached:
assert max_cached_images >= 9, 'The length of cache must >= 9, ' \
f'but got {max_cached_images}.'
super().__init__(
pre_transform=pre_transform,
prob=prob,
use_cached=use_cached,
max_cached_images=max_cached_images,
random_pop=random_pop,
max_refetch=max_refetch)
self.img_scale = img_scale
self.bbox_clip_border = bbox_clip_border
self.pad_val = pad_val
# intermediate variables
self._current_img_shape = [0, 0]
self._center_img_shape = [0, 0]
self._previous_img_shape = [0, 0]
def get_indexes(self, dataset: Union[BaseDataset, list]) -> list:
"""Call function to collect indexes.
Args:
dataset (:obj:`Dataset` or list): The dataset or cached list.
Returns:
list: indexes.
"""
indexes = [random.randint(0, len(dataset)) for _ in range(8)]
return indexes
def mix_img_transform(self, results: dict) -> dict:
"""Mixed image data transformation.
Args:
results (dict): Result dict.
Returns:
results (dict): Updated result dict.
"""
assert 'mix_results' in results
mosaic_bboxes = []
mosaic_bboxes_labels = []
mosaic_ignore_flags = []
img_scale_w, img_scale_h = self.img_scale
if len(results['img'].shape) == 3:
mosaic_img = np.full(
(int(img_scale_h * 3), int(img_scale_w * 3), 3),
self.pad_val,
dtype=results['img'].dtype)
else:
mosaic_img = np.full((int(img_scale_h * 3), int(img_scale_w * 3)),
self.pad_val,
dtype=results['img'].dtype)
# index = 0 is mean original image
# len(results['mix_results']) = 8
loc_strs = ('center', 'top', 'top_right', 'right', 'bottom_right',
'bottom', 'bottom_left', 'left', 'top_left')
results_all = [results, *results['mix_results']]
for index, results_patch in enumerate(results_all):
img_i = results_patch['img']
# keep_ratio resize
img_i_h, img_i_w = img_i.shape[:2]
scale_ratio_i = min(img_scale_h / img_i_h, img_scale_w / img_i_w)
img_i = mmcv.imresize(
img_i,
(int(img_i_w * scale_ratio_i), int(img_i_h * scale_ratio_i)))
paste_coord = self._mosaic_combine(loc_strs[index],
img_i.shape[:2])
padw, padh = paste_coord[:2]
x1, y1, x2, y2 = (max(x, 0) for x in paste_coord)
mosaic_img[y1:y2, x1:x2] = img_i[y1 - padh:, x1 - padw:]
gt_bboxes_i = results_patch['gt_bboxes']
gt_bboxes_labels_i = results_patch['gt_bboxes_labels']
gt_ignore_flags_i = results_patch['gt_ignore_flags']
gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])
gt_bboxes_i.translate_([padw, padh])
mosaic_bboxes.append(gt_bboxes_i)
mosaic_bboxes_labels.append(gt_bboxes_labels_i)
mosaic_ignore_flags.append(gt_ignore_flags_i)
# Offset
offset_x = int(random.uniform(0, img_scale_w))
offset_y = int(random.uniform(0, img_scale_h))
mosaic_img = mosaic_img[offset_y:offset_y + 2 * img_scale_h,
offset_x:offset_x + 2 * img_scale_w]
mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)
mosaic_bboxes.translate_([-offset_x, -offset_y])
mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)
mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)
if self.bbox_clip_border:
mosaic_bboxes.clip_([2 * img_scale_h, 2 * img_scale_w])
else:
# remove outside bboxes
inside_inds = mosaic_bboxes.is_inside(
[2 * img_scale_h, 2 * img_scale_w]).numpy()
mosaic_bboxes = mosaic_bboxes[inside_inds]
mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]
mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]
results['img'] = mosaic_img
results['img_shape'] = mosaic_img.shape
results['gt_bboxes'] = mosaic_bboxes
results['gt_bboxes_labels'] = mosaic_bboxes_labels
results['gt_ignore_flags'] = mosaic_ignore_flags
return results
def _mosaic_combine(self, loc: str,
img_shape_hw: Tuple[int, int]) -> Tuple[int, ...]:
"""Calculate global coordinate of mosaic image.
Args:
loc (str): Index for the sub-image.
img_shape_hw (Sequence[int]): Height and width of sub-image
Returns:
paste_coord (tuple): paste corner coordinate in mosaic image.
"""
assert loc in ('center', 'top', 'top_right', 'right', 'bottom_right',
'bottom', 'bottom_left', 'left', 'top_left')
img_scale_w, img_scale_h = self.img_scale
self._current_img_shape = img_shape_hw
current_img_h, current_img_w = self._current_img_shape
previous_img_h, previous_img_w = self._previous_img_shape
center_img_h, center_img_w = self._center_img_shape
if loc == 'center':
self._center_img_shape = self._current_img_shape
# xmin, ymin, xmax, ymax
paste_coord = img_scale_w, \
img_scale_h, \
img_scale_w + current_img_w, \
img_scale_h + current_img_h
elif loc == 'top':
paste_coord = img_scale_w, \
img_scale_h - current_img_h, \
img_scale_w + current_img_w, \
img_scale_h
elif loc == 'top_right':
paste_coord = img_scale_w + previous_img_w, \
img_scale_h - current_img_h, \
img_scale_w + previous_img_w + current_img_w, \
img_scale_h
elif loc == 'right':
paste_coord = img_scale_w + center_img_w, \
img_scale_h, \
img_scale_w + center_img_w + current_img_w, \
img_scale_h + current_img_h
elif loc == 'bottom_right':
paste_coord = img_scale_w + center_img_w, \
img_scale_h + previous_img_h, \
img_scale_w + center_img_w + current_img_w, \
img_scale_h + previous_img_h + current_img_h
elif loc == 'bottom':
paste_coord = img_scale_w + center_img_w - current_img_w, \
img_scale_h + center_img_h, \
img_scale_w + center_img_w, \
img_scale_h + center_img_h + current_img_h
elif loc == 'bottom_left':
paste_coord = img_scale_w + center_img_w - \
previous_img_w - current_img_w, \
img_scale_h + center_img_h, \
img_scale_w + center_img_w - previous_img_w, \
img_scale_h + center_img_h + current_img_h
elif loc == 'left':
paste_coord = img_scale_w - current_img_w, \
img_scale_h + center_img_h - current_img_h, \
img_scale_w, \
img_scale_h + center_img_h
elif loc == 'top_left':
paste_coord = img_scale_w - current_img_w, \
img_scale_h + center_img_h - \
previous_img_h - current_img_h, \
img_scale_w, \
img_scale_h + center_img_h - previous_img_h
self._previous_img_shape = self._current_img_shape
# xmin, ymin, xmax, ymax
return paste_coord
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'prob={self.prob})'
return repr_str
@TRANSFORMS.register_module()
class YOLOv5MixUp(BaseMixImageTransform):
"""MixUp data augmentation for YOLOv5.
.. code:: text
The mixup transform steps are as follows:
1. Another random image is picked by dataset.
2. Randomly obtain the fusion ratio from the beta distribution,
then fuse the target
of the original image and mixup image through this ratio.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- mix_results (List[dict])
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
alpha (float): parameter of beta distribution to get mixup ratio.
Defaults to 32.
beta (float): parameter of beta distribution to get mixup ratio.
Defaults to 32.
pre_transform (Sequence[dict]): Sequence of transform object or
config dict to be composed.
prob (float): Probability of applying this transformation.
Defaults to 1.0.
use_cached (bool): Whether to use cache. Defaults to False.
max_cached_images (int): The maximum length of the cache. The larger
the cache, the stronger the randomness of this transform. As a
rule of thumb, providing 10 caches for each image suffices for
randomness. Defaults to 20.
random_pop (bool): Whether to randomly pop a result from the cache
when the cache is full. If set to False, use FIFO popping method.
Defaults to True.
max_refetch (int): The maximum number of iterations. If the number of
iterations is greater than `max_refetch`, but gt_bbox is still
empty, then the iteration is terminated. Defaults to 15.
"""
def __init__(self,
alpha: float = 32.0,
beta: float = 32.0,
pre_transform: Sequence[dict] = None,
prob: float = 1.0,
use_cached: bool = False,
max_cached_images: int = 20,
random_pop: bool = True,
max_refetch: int = 15):
if use_cached:
assert max_cached_images >= 2, 'The length of cache must >= 2, ' \
f'but got {max_cached_images}.'
super().__init__(
pre_transform=pre_transform,
prob=prob,
use_cached=use_cached,
max_cached_images=max_cached_images,
random_pop=random_pop,
max_refetch=max_refetch)
self.alpha = alpha
self.beta = beta
def get_indexes(self, dataset: Union[BaseDataset, list]) -> int:
"""Call function to collect indexes.
Args:
dataset (:obj:`Dataset` or list): The dataset or cached list.
Returns:
int: indexes.
"""
return random.randint(0, len(dataset))
def mix_img_transform(self, results: dict) -> dict:
"""YOLOv5 MixUp transform function.
Args:
results (dict): Result dict
Returns:
results (dict): Updated result dict.
"""
assert 'mix_results' in results
retrieve_results = results['mix_results'][0]
retrieve_img = retrieve_results['img']
ori_img = results['img']
assert ori_img.shape == retrieve_img.shape
# Randomly obtain the fusion ratio from the beta distribution,
# which is around 0.5
ratio = np.random.beta(self.alpha, self.beta)
mixup_img = (ori_img * ratio + retrieve_img * (1 - ratio))
retrieve_gt_bboxes = retrieve_results['gt_bboxes']
retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']
retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']
mixup_gt_bboxes = retrieve_gt_bboxes.cat(
(results['gt_bboxes'], retrieve_gt_bboxes), dim=0)
mixup_gt_bboxes_labels = np.concatenate(
(results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)
mixup_gt_ignore_flags = np.concatenate(
(results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)
if 'gt_masks' in results:
assert 'gt_masks' in retrieve_results
mixup_gt_masks = results['gt_masks'].cat(
[results['gt_masks'], retrieve_results['gt_masks']])
results['gt_masks'] = mixup_gt_masks
results['img'] = mixup_img.astype(np.uint8)
results['img_shape'] = mixup_img.shape
results['gt_bboxes'] = mixup_gt_bboxes
results['gt_bboxes_labels'] = mixup_gt_bboxes_labels
results['gt_ignore_flags'] = mixup_gt_ignore_flags
return results
@TRANSFORMS.register_module()
class YOLOXMixUp(BaseMixImageTransform):
"""MixUp data augmentation for YOLOX.
.. code:: text
mixup transform
+---------------+--------------+
| mixup image | |
| +--------|--------+ |
| | | | |
+---------------+ | |
| | | |
| | image | |
| | | |
| | | |
| +-----------------+ |
| pad |
+------------------------------+
The mixup transform steps are as follows:
1. Another random image is picked by dataset and embedded in
the top left patch(after padding and resizing)
2. The target of mixup transform is the weighted average of mixup
image and origin image.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- mix_results (List[dict])
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Args:
img_scale (Sequence[int]): Image output size after mixup pipeline.
The shape order should be (width, height). Defaults to (640, 640).
ratio_range (Sequence[float]): Scale ratio of mixup image.
Defaults to (0.5, 1.5).
flip_ratio (float): Horizontal flip ratio of mixup image.
Defaults to 0.5.
pad_val (int): Pad value. Defaults to 114.
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
pre_transform(Sequence[dict]): Sequence of transform object or
config dict to be composed.
prob (float): Probability of applying this transformation.
Defaults to 1.0.
use_cached (bool): Whether to use cache. Defaults to False.
max_cached_images (int): The maximum length of the cache. The larger
the cache, the stronger the randomness of this transform. As a
rule of thumb, providing 10 caches for each image suffices for
randomness. Defaults to 20.
random_pop (bool): Whether to randomly pop a result from the cache
when the cache is full. If set to False, use FIFO popping method.
Defaults to True.
max_refetch (int): The maximum number of iterations. If the number of
iterations is greater than `max_refetch`, but gt_bbox is still
empty, then the iteration is terminated. Defaults to 15.
"""
def __init__(self,
img_scale: Tuple[int, int] = (640, 640),
ratio_range: Tuple[float, float] = (0.5, 1.5),
flip_ratio: float = 0.5,
pad_val: float = 114.0,
bbox_clip_border: bool = True,
pre_transform: Sequence[dict] = None,
prob: float = 1.0,
use_cached: bool = False,
max_cached_images: int = 20,
random_pop: bool = True,
max_refetch: int = 15):
assert isinstance(img_scale, tuple)
if use_cached:
assert max_cached_images >= 2, 'The length of cache must >= 2, ' \
f'but got {max_cached_images}.'
super().__init__(
pre_transform=pre_transform,
prob=prob,
use_cached=use_cached,
max_cached_images=max_cached_images,
random_pop=random_pop,
max_refetch=max_refetch)
self.img_scale = img_scale
self.ratio_range = ratio_range
self.flip_ratio = flip_ratio
self.pad_val = pad_val
self.bbox_clip_border = bbox_clip_border
def get_indexes(self, dataset: Union[BaseDataset, list]) -> int:
"""Call function to collect indexes.
Args:
dataset (:obj:`Dataset` or list): The dataset or cached list.
Returns:
int: indexes.
"""
return random.randint(0, len(dataset))
def mix_img_transform(self, results: dict) -> dict:
"""YOLOX MixUp transform function.
Args:
results (dict): Result dict.
Returns:
results (dict): Updated result dict.
"""
assert 'mix_results' in results
assert len(
results['mix_results']) == 1, 'MixUp only support 2 images now !'
if results['mix_results'][0]['gt_bboxes'].shape[0] == 0:
# empty bbox
return results
retrieve_results = results['mix_results'][0]
retrieve_img = retrieve_results['img']
jit_factor = random.uniform(*self.ratio_range)
is_filp = random.uniform(0, 1) > self.flip_ratio
if len(retrieve_img.shape) == 3:
out_img = np.ones((self.img_scale[1], self.img_scale[0], 3),
dtype=retrieve_img.dtype) * self.pad_val
else:
out_img = np.ones(
self.img_scale[::-1], dtype=retrieve_img.dtype) * self.pad_val
# 1. keep_ratio resize
scale_ratio = min(self.img_scale[1] / retrieve_img.shape[0],
self.img_scale[0] / retrieve_img.shape[1])
retrieve_img = mmcv.imresize(
retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),
int(retrieve_img.shape[0] * scale_ratio)))
# 2. paste
out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img
# 3. scale jit
scale_ratio *= jit_factor
out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),
int(out_img.shape[0] * jit_factor)))
# 4. flip
if is_filp:
out_img = out_img[:, ::-1, :]
# 5. random crop
ori_img = results['img']
origin_h, origin_w = out_img.shape[:2]
target_h, target_w = ori_img.shape[:2]
padded_img = np.ones((max(origin_h, target_h), max(
origin_w, target_w), 3)) * self.pad_val
padded_img = padded_img.astype(np.uint8)
padded_img[:origin_h, :origin_w] = out_img
x_offset, y_offset = 0, 0
if padded_img.shape[0] > target_h:
y_offset = random.randint(0, padded_img.shape[0] - target_h)
if padded_img.shape[1] > target_w:
x_offset = random.randint(0, padded_img.shape[1] - target_w)
padded_cropped_img = padded_img[y_offset:y_offset + target_h,
x_offset:x_offset + target_w]
# 6. adjust bbox
retrieve_gt_bboxes = retrieve_results['gt_bboxes']
retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])
if self.bbox_clip_border:
retrieve_gt_bboxes.clip_([origin_h, origin_w])
if is_filp:
retrieve_gt_bboxes.flip_([origin_h, origin_w],
direction='horizontal')
# 7. filter
cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()
cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])
if self.bbox_clip_border:
cp_retrieve_gt_bboxes.clip_([target_h, target_w])
# 8. mix up
mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img
retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']
retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']
mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(
(results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)
mixup_gt_bboxes_labels = np.concatenate(
(results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)
mixup_gt_ignore_flags = np.concatenate(
(results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)
if not self.bbox_clip_border:
# remove outside bbox
inside_inds = mixup_gt_bboxes.is_inside([target_h,
target_w]).numpy()
mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]
mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]
mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]
results['img'] = mixup_img.astype(np.uint8)
results['img_shape'] = mixup_img.shape
results['gt_bboxes'] = mixup_gt_bboxes
results['gt_bboxes_labels'] = mixup_gt_bboxes_labels
results['gt_ignore_flags'] = mixup_gt_ignore_flags
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(img_scale={self.img_scale}, '
repr_str += f'ratio_range={self.ratio_range}, '
repr_str += f'flip_ratio={self.flip_ratio}, '
repr_str += f'pad_val={self.pad_val}, '
repr_str += f'max_refetch={self.max_refetch}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
| 46,505 | 39.404865 | 79 | py |
mmyolo | mmyolo-main/mmyolo/datasets/transforms/transforms.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from copy import deepcopy
from typing import List, Sequence, Tuple, Union
import cv2
import mmcv
import numpy as np
import torch
from mmcv.transforms import BaseTransform, Compose
from mmcv.transforms.utils import cache_randomness
from mmdet.datasets.transforms import LoadAnnotations as MMDET_LoadAnnotations
from mmdet.datasets.transforms import Resize as MMDET_Resize
from mmdet.structures.bbox import (HorizontalBoxes, autocast_box_type,
get_box_type)
from mmdet.structures.mask import PolygonMasks
from numpy import random
from mmyolo.registry import TRANSFORMS
# TODO: Waiting for MMCV support
TRANSFORMS.register_module(module=Compose, force=True)
@TRANSFORMS.register_module()
class YOLOv5KeepRatioResize(MMDET_Resize):
"""Resize images & bbox(if existed).
This transform resizes the input image according to ``scale``.
Bboxes (if existed) are then resized with the same scale factor.
Required Keys:
- img (np.uint8)
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
Modified Keys:
- img (np.uint8)
- img_shape (tuple)
- gt_bboxes (optional)
- scale (float)
Added Keys:
- scale_factor (np.float32)
Args:
scale (Union[int, Tuple[int, int]]): Images scales for resizing.
"""
def __init__(self,
scale: Union[int, Tuple[int, int]],
keep_ratio: bool = True,
**kwargs):
assert keep_ratio is True
super().__init__(scale=scale, keep_ratio=True, **kwargs)
@staticmethod
def _get_rescale_ratio(old_size: Tuple[int, int],
scale: Union[float, Tuple[int]]) -> float:
"""Calculate the ratio for rescaling.
Args:
old_size (tuple[int]): The old size (w, h) of image.
scale (float | tuple[int]): The scaling factor or maximum size.
If it is a float number, then the image will be rescaled by
this factor, else if it is a tuple of 2 integers, then
the image will be rescaled as large as possible within
the scale.
Returns:
float: The resize ratio.
"""
w, h = old_size
if isinstance(scale, (float, int)):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
scale_factor = scale
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
scale_factor = min(max_long_edge / max(h, w),
max_short_edge / min(h, w))
else:
raise TypeError('Scale must be a number or tuple of int, '
f'but got {type(scale)}')
return scale_factor
def _resize_img(self, results: dict):
"""Resize images with ``results['scale']``."""
assert self.keep_ratio is True
if results.get('img', None) is not None:
image = results['img']
original_h, original_w = image.shape[:2]
ratio = self._get_rescale_ratio((original_h, original_w),
self.scale)
if ratio != 1:
# resize image according to the ratio
image = mmcv.imrescale(
img=image,
scale=ratio,
interpolation='area' if ratio < 1 else 'bilinear',
backend=self.backend)
resized_h, resized_w = image.shape[:2]
scale_ratio = resized_h / original_h
scale_factor = (scale_ratio, scale_ratio)
results['img'] = image
results['img_shape'] = image.shape[:2]
results['scale_factor'] = scale_factor
@TRANSFORMS.register_module()
class LetterResize(MMDET_Resize):
"""Resize and pad image while meeting stride-multiple constraints.
Required Keys:
- img (np.uint8)
- batch_shape (np.int64) (optional)
Modified Keys:
- img (np.uint8)
- img_shape (tuple)
- gt_bboxes (optional)
Added Keys:
- pad_param (np.float32)
Args:
scale (Union[int, Tuple[int, int]]): Images scales for resizing.
pad_val (dict): Padding value. Defaults to dict(img=0, seg=255).
use_mini_pad (bool): Whether using minimum rectangle padding.
Defaults to True
stretch_only (bool): Whether stretch to the specified size directly.
Defaults to False
allow_scale_up (bool): Allow scale up when ratio > 1. Defaults to True
"""
def __init__(self,
scale: Union[int, Tuple[int, int]],
pad_val: dict = dict(img=0, mask=0, seg=255),
use_mini_pad: bool = False,
stretch_only: bool = False,
allow_scale_up: bool = True,
**kwargs):
super().__init__(scale=scale, keep_ratio=True, **kwargs)
self.pad_val = pad_val
if isinstance(pad_val, (int, float)):
pad_val = dict(img=pad_val, seg=255)
assert isinstance(
pad_val, dict), f'pad_val must be dict, but got {type(pad_val)}'
self.use_mini_pad = use_mini_pad
self.stretch_only = stretch_only
self.allow_scale_up = allow_scale_up
def _resize_img(self, results: dict):
"""Resize images with ``results['scale']``."""
image = results.get('img', None)
if image is None:
return
# Use batch_shape if a batch_shape policy is configured
if 'batch_shape' in results:
scale = tuple(results['batch_shape']) # hw
else:
scale = self.scale[::-1] # wh -> hw
image_shape = image.shape[:2] # height, width
# Scale ratio (new / old)
ratio = min(scale[0] / image_shape[0], scale[1] / image_shape[1])
# only scale down, do not scale up (for better test mAP)
if not self.allow_scale_up:
ratio = min(ratio, 1.0)
ratio = [ratio, ratio] # float -> (float, float) for (height, width)
# compute the best size of the image
no_pad_shape = (int(round(image_shape[0] * ratio[0])),
int(round(image_shape[1] * ratio[1])))
# padding height & width
padding_h, padding_w = [
scale[0] - no_pad_shape[0], scale[1] - no_pad_shape[1]
]
if self.use_mini_pad:
# minimum rectangle padding
padding_w, padding_h = np.mod(padding_w, 32), np.mod(padding_h, 32)
elif self.stretch_only:
# stretch to the specified size directly
padding_h, padding_w = 0.0, 0.0
no_pad_shape = (scale[0], scale[1])
ratio = [scale[0] / image_shape[0],
scale[1] / image_shape[1]] # height, width ratios
if image_shape != no_pad_shape:
# compare with no resize and padding size
image = mmcv.imresize(
image, (no_pad_shape[1], no_pad_shape[0]),
interpolation=self.interpolation,
backend=self.backend)
scale_factor = (ratio[1], ratio[0]) # mmcv scale factor is (w, h)
if 'scale_factor' in results:
results['scale_factor_origin'] = results['scale_factor']
results['scale_factor'] = scale_factor
# padding
top_padding, left_padding = int(round(padding_h // 2 - 0.1)), int(
round(padding_w // 2 - 0.1))
bottom_padding = padding_h - top_padding
right_padding = padding_w - left_padding
padding_list = [
top_padding, bottom_padding, left_padding, right_padding
]
if top_padding != 0 or bottom_padding != 0 or \
left_padding != 0 or right_padding != 0:
pad_val = self.pad_val.get('img', 0)
if isinstance(pad_val, int) and image.ndim == 3:
pad_val = tuple(pad_val for _ in range(image.shape[2]))
image = mmcv.impad(
img=image,
padding=(padding_list[2], padding_list[0], padding_list[3],
padding_list[1]),
pad_val=pad_val,
padding_mode='constant')
results['img'] = image
results['img_shape'] = image.shape
if 'pad_param' in results:
results['pad_param_origin'] = results['pad_param'] * \
np.repeat(ratio, 2)
results['pad_param'] = np.array(padding_list, dtype=np.float32)
def _resize_masks(self, results: dict):
"""Resize masks with ``results['scale']``"""
if results.get('gt_masks', None) is None:
return
gt_masks = results['gt_masks']
assert isinstance(
gt_masks, PolygonMasks
), f'Only supports PolygonMasks, but got {type(gt_masks)}'
# resize the gt_masks
gt_mask_h = results['gt_masks'].height * results['scale_factor'][1]
gt_mask_w = results['gt_masks'].width * results['scale_factor'][0]
gt_masks = results['gt_masks'].resize(
(int(round(gt_mask_h)), int(round(gt_mask_w))))
top_padding, _, left_padding, _ = results['pad_param']
if int(left_padding) != 0:
gt_masks = gt_masks.translate(
out_shape=results['img_shape'][:2],
offset=int(left_padding),
direction='horizontal')
if int(top_padding) != 0:
gt_masks = gt_masks.translate(
out_shape=results['img_shape'][:2],
offset=int(top_padding),
direction='vertical')
results['gt_masks'] = gt_masks
def _resize_bboxes(self, results: dict):
"""Resize bounding boxes with ``results['scale_factor']``."""
if results.get('gt_bboxes', None) is None:
return
results['gt_bboxes'].rescale_(results['scale_factor'])
if len(results['pad_param']) != 4:
return
results['gt_bboxes'].translate_(
(results['pad_param'][2], results['pad_param'][0]))
if self.clip_object_border:
results['gt_bboxes'].clip_(results['img_shape'])
def transform(self, results: dict) -> dict:
results = super().transform(results)
if 'scale_factor_origin' in results:
scale_factor_origin = results.pop('scale_factor_origin')
results['scale_factor'] = (results['scale_factor'][0] *
scale_factor_origin[0],
results['scale_factor'][1] *
scale_factor_origin[1])
if 'pad_param_origin' in results:
pad_param_origin = results.pop('pad_param_origin')
results['pad_param'] += pad_param_origin
return results
# TODO: Check if it can be merged with mmdet.YOLOXHSVRandomAug
@TRANSFORMS.register_module()
class YOLOv5HSVRandomAug(BaseTransform):
"""Apply HSV augmentation to image sequentially.
Required Keys:
- img
Modified Keys:
- img
Args:
hue_delta ([int, float]): delta of hue. Defaults to 0.015.
saturation_delta ([int, float]): delta of saturation. Defaults to 0.7.
value_delta ([int, float]): delta of value. Defaults to 0.4.
"""
def __init__(self,
hue_delta: Union[int, float] = 0.015,
saturation_delta: Union[int, float] = 0.7,
value_delta: Union[int, float] = 0.4):
self.hue_delta = hue_delta
self.saturation_delta = saturation_delta
self.value_delta = value_delta
def transform(self, results: dict) -> dict:
"""The HSV augmentation transform function.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
hsv_gains = \
random.uniform(-1, 1, 3) * \
[self.hue_delta, self.saturation_delta, self.value_delta] + 1
hue, sat, val = cv2.split(
cv2.cvtColor(results['img'], cv2.COLOR_BGR2HSV))
table_list = np.arange(0, 256, dtype=hsv_gains.dtype)
lut_hue = ((table_list * hsv_gains[0]) % 180).astype(np.uint8)
lut_sat = np.clip(table_list * hsv_gains[1], 0, 255).astype(np.uint8)
lut_val = np.clip(table_list * hsv_gains[2], 0, 255).astype(np.uint8)
im_hsv = cv2.merge(
(cv2.LUT(hue, lut_hue), cv2.LUT(sat,
lut_sat), cv2.LUT(val, lut_val)))
results['img'] = cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(hue_delta={self.hue_delta}, '
repr_str += f'saturation_delta={self.saturation_delta}, '
repr_str += f'value_delta={self.value_delta})'
return repr_str
@TRANSFORMS.register_module()
class LoadAnnotations(MMDET_LoadAnnotations):
"""Because the yolo series does not need to consider ignore bboxes for the
time being, in order to speed up the pipeline, it can be excluded in
advance."""
def __init__(self,
mask2bbox: bool = False,
poly2mask: bool = False,
**kwargs) -> None:
self.mask2bbox = mask2bbox
assert not poly2mask, 'Does not support BitmapMasks considering ' \
'that bitmap consumes more memory.'
super().__init__(poly2mask=poly2mask, **kwargs)
if self.mask2bbox:
assert self.with_mask, 'Using mask2bbox requires ' \
'with_mask is True.'
self._mask_ignore_flag = None
def transform(self, results: dict) -> dict:
"""Function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
Returns:
dict: The dict contains loaded bounding box, label and
semantic segmentation.
"""
if self.mask2bbox:
self._load_masks(results)
if self.with_label:
self._load_labels(results)
self._update_mask_ignore_data(results)
gt_bboxes = results['gt_masks'].get_bboxes(dst_type='hbox')
results['gt_bboxes'] = gt_bboxes
else:
results = super().transform(results)
self._update_mask_ignore_data(results)
return results
def _update_mask_ignore_data(self, results: dict) -> None:
if 'gt_masks' not in results:
return
if 'gt_bboxes_labels' in results and len(
results['gt_bboxes_labels']) != len(results['gt_masks']):
assert len(results['gt_bboxes_labels']) == len(
self._mask_ignore_flag)
results['gt_bboxes_labels'] = results['gt_bboxes_labels'][
self._mask_ignore_flag]
if 'gt_bboxes' in results and len(results['gt_bboxes']) != len(
results['gt_masks']):
assert len(results['gt_bboxes']) == len(self._mask_ignore_flag)
results['gt_bboxes'] = results['gt_bboxes'][self._mask_ignore_flag]
def _load_bboxes(self, results: dict):
"""Private function to load bounding box annotations.
Note: BBoxes with ignore_flag of 1 is not considered.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
Returns:
dict: The dict contains loaded bounding box annotations.
"""
gt_bboxes = []
gt_ignore_flags = []
for instance in results.get('instances', []):
if instance['ignore_flag'] == 0:
gt_bboxes.append(instance['bbox'])
gt_ignore_flags.append(instance['ignore_flag'])
results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)
if self.box_type is None:
results['gt_bboxes'] = np.array(
gt_bboxes, dtype=np.float32).reshape((-1, 4))
else:
_, box_type_cls = get_box_type(self.box_type)
results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)
def _load_labels(self, results: dict):
"""Private function to load label annotations.
Note: BBoxes with ignore_flag of 1 is not considered.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
Returns:
dict: The dict contains loaded label annotations.
"""
gt_bboxes_labels = []
for instance in results.get('instances', []):
if instance['ignore_flag'] == 0:
gt_bboxes_labels.append(instance['bbox_label'])
results['gt_bboxes_labels'] = np.array(
gt_bboxes_labels, dtype=np.int64)
def _load_masks(self, results: dict) -> None:
"""Private function to load mask annotations.
Args:
results (dict): Result dict from :obj:``mmengine.BaseDataset``.
"""
gt_masks = []
gt_ignore_flags = []
self._mask_ignore_flag = []
for instance in results.get('instances', []):
if instance['ignore_flag'] == 0:
if 'mask' in instance:
gt_mask = instance['mask']
if isinstance(gt_mask, list):
gt_mask = [
np.array(polygon) for polygon in gt_mask
if len(polygon) % 2 == 0 and len(polygon) >= 6
]
if len(gt_mask) == 0:
# ignore
self._mask_ignore_flag.append(0)
else:
gt_masks.append(gt_mask)
gt_ignore_flags.append(instance['ignore_flag'])
self._mask_ignore_flag.append(1)
else:
raise NotImplementedError(
'Only supports mask annotations in polygon '
'format currently')
else:
# TODO: Actually, gt with bbox and without mask needs
# to be retained
self._mask_ignore_flag.append(0)
self._mask_ignore_flag = np.array(self._mask_ignore_flag, dtype=bool)
results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)
h, w = results['ori_shape']
gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)
results['gt_masks'] = gt_masks
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(with_bbox={self.with_bbox}, '
repr_str += f'with_label={self.with_label}, '
repr_str += f'with_mask={self.with_mask}, '
repr_str += f'with_seg={self.with_seg}, '
repr_str += f'mask2bbox={self.mask2bbox}, '
repr_str += f'poly2mask={self.poly2mask}, '
repr_str += f"imdecode_backend='{self.imdecode_backend}', "
repr_str += f'file_client_args={self.file_client_args})'
return repr_str
@TRANSFORMS.register_module()
class YOLOv5RandomAffine(BaseTransform):
"""Random affine transform data augmentation in YOLOv5 and YOLOv8. It is
different from the implementation in YOLOX.
This operation randomly generates affine transform matrix which including
rotation, translation, shear and scaling transforms.
If you set use_mask_refine == True, the code will use the masks
annotation to refine the bbox.
Our implementation is slightly different from the official. In COCO
dataset, a gt may have multiple mask tags. The official YOLOv5
annotation file already combines the masks that an object has,
but our code takes into account the fact that an object has multiple masks.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- gt_masks (PolygonMasks) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
- gt_masks (PolygonMasks) (optional)
Args:
max_rotate_degree (float): Maximum degrees of rotation transform.
Defaults to 10.
max_translate_ratio (float): Maximum ratio of translation.
Defaults to 0.1.
scaling_ratio_range (tuple[float]): Min and max ratio of
scaling transform. Defaults to (0.5, 1.5).
max_shear_degree (float): Maximum degrees of shear
transform. Defaults to 2.
border (tuple[int]): Distance from width and height sides of input
image to adjust output shape. Only used in mosaic dataset.
Defaults to (0, 0).
border_val (tuple[int]): Border padding values of 3 channels.
Defaults to (114, 114, 114).
bbox_clip_border (bool, optional): Whether to clip the objects outside
the border of the image. In some dataset like MOT17, the gt bboxes
are allowed to cross the border of images. Therefore, we don't
need to clip the gt bboxes in these cases. Defaults to True.
min_bbox_size (float): Width and height threshold to filter bboxes.
If the height or width of a box is smaller than this value, it
will be removed. Defaults to 2.
min_area_ratio (float): Threshold of area ratio between
original bboxes and wrapped bboxes. If smaller than this value,
the box will be removed. Defaults to 0.1.
use_mask_refine (bool): Whether to refine bbox by mask.
max_aspect_ratio (float): Aspect ratio of width and height
threshold to filter bboxes. If max(h/w, w/h) larger than this
value, the box will be removed. Defaults to 20.
resample_num (int): Number of poly to resample to.
"""
def __init__(self,
max_rotate_degree: float = 10.0,
max_translate_ratio: float = 0.1,
scaling_ratio_range: Tuple[float, float] = (0.5, 1.5),
max_shear_degree: float = 2.0,
border: Tuple[int, int] = (0, 0),
border_val: Tuple[int, int, int] = (114, 114, 114),
bbox_clip_border: bool = True,
min_bbox_size: int = 2,
min_area_ratio: float = 0.1,
use_mask_refine: bool = False,
max_aspect_ratio: float = 20.,
resample_num: int = 1000):
assert 0 <= max_translate_ratio <= 1
assert scaling_ratio_range[0] <= scaling_ratio_range[1]
assert scaling_ratio_range[0] > 0
self.max_rotate_degree = max_rotate_degree
self.max_translate_ratio = max_translate_ratio
self.scaling_ratio_range = scaling_ratio_range
self.max_shear_degree = max_shear_degree
self.border = border
self.border_val = border_val
self.bbox_clip_border = bbox_clip_border
self.min_bbox_size = min_bbox_size
self.min_area_ratio = min_area_ratio
self.use_mask_refine = use_mask_refine
self.max_aspect_ratio = max_aspect_ratio
self.resample_num = resample_num
@autocast_box_type()
def transform(self, results: dict) -> dict:
"""The YOLOv5 random affine transform function.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
img = results['img']
# self.border is wh format
height = img.shape[0] + self.border[1] * 2
width = img.shape[1] + self.border[0] * 2
# Note: Different from YOLOX
center_matrix = np.eye(3, dtype=np.float32)
center_matrix[0, 2] = -img.shape[1] / 2
center_matrix[1, 2] = -img.shape[0] / 2
warp_matrix, scaling_ratio = self._get_random_homography_matrix(
height, width)
warp_matrix = warp_matrix @ center_matrix
img = cv2.warpPerspective(
img,
warp_matrix,
dsize=(width, height),
borderValue=self.border_val)
results['img'] = img
results['img_shape'] = img.shape
img_h, img_w = img.shape[:2]
bboxes = results['gt_bboxes']
num_bboxes = len(bboxes)
if num_bboxes:
orig_bboxes = bboxes.clone()
if self.use_mask_refine and 'gt_masks' in results:
# If the dataset has annotations of mask,
# the mask will be used to refine bbox.
gt_masks = results['gt_masks']
gt_masks_resample = self.resample_masks(gt_masks)
gt_masks = self.warp_mask(gt_masks_resample, warp_matrix,
img_h, img_w)
# refine bboxes by masks
bboxes = gt_masks.get_bboxes(dst_type='hbox')
# filter bboxes outside image
valid_index = self.filter_gt_bboxes(orig_bboxes,
bboxes).numpy()
results['gt_masks'] = gt_masks[valid_index]
else:
bboxes.project_(warp_matrix)
if self.bbox_clip_border:
bboxes.clip_([height, width])
# filter bboxes
orig_bboxes.rescale_([scaling_ratio, scaling_ratio])
# Be careful: valid_index must convert to numpy,
# otherwise it will raise out of bounds when len(valid_index)=1
valid_index = self.filter_gt_bboxes(orig_bboxes,
bboxes).numpy()
if 'gt_masks' in results:
results['gt_masks'] = PolygonMasks(
results['gt_masks'].masks, img_h, img_w)
results['gt_bboxes'] = bboxes[valid_index]
results['gt_bboxes_labels'] = results['gt_bboxes_labels'][
valid_index]
results['gt_ignore_flags'] = results['gt_ignore_flags'][
valid_index]
return results
@staticmethod
def warp_poly(poly: np.ndarray, warp_matrix: np.ndarray, img_w: int,
img_h: int) -> np.ndarray:
"""Function to warp one mask and filter points outside image.
Args:
poly (np.ndarray): Segmentation annotation with shape (n, ) and
with format (x1, y1, x2, y2, ...).
warp_matrix (np.ndarray): Affine transformation matrix.
Shape: (3, 3).
img_w (int): Width of output image.
img_h (int): Height of output image.
"""
# TODO: Current logic may cause retained masks unusable for
# semantic segmentation training, which is same as official
# implementation.
poly = poly.reshape((-1, 2))
poly = np.concatenate((poly, np.ones(
(len(poly), 1), dtype=poly.dtype)),
axis=-1)
# transform poly
poly = poly @ warp_matrix.T
poly = poly[:, :2] / poly[:, 2:3]
# filter point outside image
x, y = poly.T
valid_ind_point = (x >= 0) & (y >= 0) & (x <= img_w) & (y <= img_h)
return poly[valid_ind_point].reshape(-1)
def warp_mask(self, gt_masks: PolygonMasks, warp_matrix: np.ndarray,
img_w: int, img_h: int) -> PolygonMasks:
"""Warp masks by warp_matrix and retain masks inside image after
warping.
Args:
gt_masks (PolygonMasks): Annotations of semantic segmentation.
warp_matrix (np.ndarray): Affine transformation matrix.
Shape: (3, 3).
img_w (int): Width of output image.
img_h (int): Height of output image.
Returns:
PolygonMasks: Masks after warping.
"""
masks = gt_masks.masks
new_masks = []
for poly_per_obj in masks:
warpped_poly_per_obj = []
# One gt may have multiple masks.
for poly in poly_per_obj:
valid_poly = self.warp_poly(poly, warp_matrix, img_w, img_h)
if len(valid_poly):
warpped_poly_per_obj.append(valid_poly.reshape(-1))
# If all the masks are invalid,
# add [0, 0, 0, 0, 0, 0,] here.
if not warpped_poly_per_obj:
# This will be filtered in function `filter_gt_bboxes`.
warpped_poly_per_obj = [
np.zeros(6, dtype=poly_per_obj[0].dtype)
]
new_masks.append(warpped_poly_per_obj)
gt_masks = PolygonMasks(new_masks, img_h, img_w)
return gt_masks
def resample_masks(self, gt_masks: PolygonMasks) -> PolygonMasks:
"""Function to resample each mask annotation with shape (2 * n, ) to
shape (resample_num * 2, ).
Args:
gt_masks (PolygonMasks): Annotations of semantic segmentation.
"""
masks = gt_masks.masks
new_masks = []
for poly_per_obj in masks:
resample_poly_per_obj = []
for poly in poly_per_obj:
poly = poly.reshape((-1, 2)) # xy
poly = np.concatenate((poly, poly[0:1, :]), axis=0)
x = np.linspace(0, len(poly) - 1, self.resample_num)
xp = np.arange(len(poly))
poly = np.concatenate([
np.interp(x, xp, poly[:, i]) for i in range(2)
]).reshape(2, -1).T.reshape(-1)
resample_poly_per_obj.append(poly)
new_masks.append(resample_poly_per_obj)
return PolygonMasks(new_masks, gt_masks.height, gt_masks.width)
def filter_gt_bboxes(self, origin_bboxes: HorizontalBoxes,
wrapped_bboxes: HorizontalBoxes) -> torch.Tensor:
"""Filter gt bboxes.
Args:
origin_bboxes (HorizontalBoxes): Origin bboxes.
wrapped_bboxes (HorizontalBoxes): Wrapped bboxes
Returns:
dict: The result dict.
"""
origin_w = origin_bboxes.widths
origin_h = origin_bboxes.heights
wrapped_w = wrapped_bboxes.widths
wrapped_h = wrapped_bboxes.heights
aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16),
wrapped_h / (wrapped_w + 1e-16))
wh_valid_idx = (wrapped_w > self.min_bbox_size) & \
(wrapped_h > self.min_bbox_size)
area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h +
1e-16) > self.min_area_ratio
aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio
return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx
@cache_randomness
def _get_random_homography_matrix(self, height: int,
width: int) -> Tuple[np.ndarray, float]:
"""Get random homography matrix.
Args:
height (int): Image height.
width (int): Image width.
Returns:
Tuple[np.ndarray, float]: The result of warp_matrix and
scaling_ratio.
"""
# Rotation
rotation_degree = random.uniform(-self.max_rotate_degree,
self.max_rotate_degree)
rotation_matrix = self._get_rotation_matrix(rotation_degree)
# Scaling
scaling_ratio = random.uniform(self.scaling_ratio_range[0],
self.scaling_ratio_range[1])
scaling_matrix = self._get_scaling_matrix(scaling_ratio)
# Shear
x_degree = random.uniform(-self.max_shear_degree,
self.max_shear_degree)
y_degree = random.uniform(-self.max_shear_degree,
self.max_shear_degree)
shear_matrix = self._get_shear_matrix(x_degree, y_degree)
# Translation
trans_x = random.uniform(0.5 - self.max_translate_ratio,
0.5 + self.max_translate_ratio) * width
trans_y = random.uniform(0.5 - self.max_translate_ratio,
0.5 + self.max_translate_ratio) * height
translate_matrix = self._get_translation_matrix(trans_x, trans_y)
warp_matrix = (
translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix)
return warp_matrix, scaling_ratio
@staticmethod
def _get_rotation_matrix(rotate_degrees: float) -> np.ndarray:
"""Get rotation matrix.
Args:
rotate_degrees (float): Rotate degrees.
Returns:
np.ndarray: The rotation matrix.
"""
radian = math.radians(rotate_degrees)
rotation_matrix = np.array(
[[np.cos(radian), -np.sin(radian), 0.],
[np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]],
dtype=np.float32)
return rotation_matrix
@staticmethod
def _get_scaling_matrix(scale_ratio: float) -> np.ndarray:
"""Get scaling matrix.
Args:
scale_ratio (float): Scale ratio.
Returns:
np.ndarray: The scaling matrix.
"""
scaling_matrix = np.array(
[[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]],
dtype=np.float32)
return scaling_matrix
@staticmethod
def _get_shear_matrix(x_shear_degrees: float,
y_shear_degrees: float) -> np.ndarray:
"""Get shear matrix.
Args:
x_shear_degrees (float): X shear degrees.
y_shear_degrees (float): Y shear degrees.
Returns:
np.ndarray: The shear matrix.
"""
x_radian = math.radians(x_shear_degrees)
y_radian = math.radians(y_shear_degrees)
shear_matrix = np.array([[1, np.tan(x_radian), 0.],
[np.tan(y_radian), 1, 0.], [0., 0., 1.]],
dtype=np.float32)
return shear_matrix
@staticmethod
def _get_translation_matrix(x: float, y: float) -> np.ndarray:
"""Get translation matrix.
Args:
x (float): X translation.
y (float): Y translation.
Returns:
np.ndarray: The translation matrix.
"""
translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]],
dtype=np.float32)
return translation_matrix
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(max_rotate_degree={self.max_rotate_degree}, '
repr_str += f'max_translate_ratio={self.max_translate_ratio}, '
repr_str += f'scaling_ratio_range={self.scaling_ratio_range}, '
repr_str += f'max_shear_degree={self.max_shear_degree}, '
repr_str += f'border={self.border}, '
repr_str += f'border_val={self.border_val}, '
repr_str += f'bbox_clip_border={self.bbox_clip_border})'
return repr_str
@TRANSFORMS.register_module()
class PPYOLOERandomDistort(BaseTransform):
"""Random hue, saturation, contrast and brightness distortion.
Required Keys:
- img
Modified Keys:
- img (np.float32)
Args:
hue_cfg (dict): Hue settings. Defaults to dict(min=-18,
max=18, prob=0.5).
saturation_cfg (dict): Saturation settings. Defaults to dict(
min=0.5, max=1.5, prob=0.5).
contrast_cfg (dict): Contrast settings. Defaults to dict(
min=0.5, max=1.5, prob=0.5).
brightness_cfg (dict): Brightness settings. Defaults to dict(
min=0.5, max=1.5, prob=0.5).
num_distort_func (int): The number of distort function. Defaults
to 4.
"""
def __init__(self,
hue_cfg: dict = dict(min=-18, max=18, prob=0.5),
saturation_cfg: dict = dict(min=0.5, max=1.5, prob=0.5),
contrast_cfg: dict = dict(min=0.5, max=1.5, prob=0.5),
brightness_cfg: dict = dict(min=0.5, max=1.5, prob=0.5),
num_distort_func: int = 4):
self.hue_cfg = hue_cfg
self.saturation_cfg = saturation_cfg
self.contrast_cfg = contrast_cfg
self.brightness_cfg = brightness_cfg
self.num_distort_func = num_distort_func
assert 0 < self.num_distort_func <= 4, \
'num_distort_func must > 0 and <= 4'
for cfg in [
self.hue_cfg, self.saturation_cfg, self.contrast_cfg,
self.brightness_cfg
]:
assert 0. <= cfg['prob'] <= 1., 'prob must >=0 and <=1'
def transform_hue(self, results):
"""Transform hue randomly."""
if random.uniform(0., 1.) >= self.hue_cfg['prob']:
return results
img = results['img']
delta = random.uniform(self.hue_cfg['min'], self.hue_cfg['max'])
u = np.cos(delta * np.pi)
w = np.sin(delta * np.pi)
delta_iq = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])
rgb2yiq_matrix = np.array([[0.114, 0.587, 0.299],
[-0.321, -0.274, 0.596],
[0.311, -0.523, 0.211]])
yiq2rgb_matric = np.array([[1.0, -1.107, 1.705], [1.0, -0.272, -0.647],
[1.0, 0.956, 0.621]])
t = np.dot(np.dot(yiq2rgb_matric, delta_iq), rgb2yiq_matrix).T
img = np.dot(img, t)
results['img'] = img
return results
def transform_saturation(self, results):
"""Transform saturation randomly."""
if random.uniform(0., 1.) >= self.saturation_cfg['prob']:
return results
img = results['img']
delta = random.uniform(self.saturation_cfg['min'],
self.saturation_cfg['max'])
# convert bgr img to gray img
gray = img * np.array([[[0.114, 0.587, 0.299]]], dtype=np.float32)
gray = gray.sum(axis=2, keepdims=True)
gray *= (1.0 - delta)
img *= delta
img += gray
results['img'] = img
return results
def transform_contrast(self, results):
"""Transform contrast randomly."""
if random.uniform(0., 1.) >= self.contrast_cfg['prob']:
return results
img = results['img']
delta = random.uniform(self.contrast_cfg['min'],
self.contrast_cfg['max'])
img *= delta
results['img'] = img
return results
def transform_brightness(self, results):
"""Transform brightness randomly."""
if random.uniform(0., 1.) >= self.brightness_cfg['prob']:
return results
img = results['img']
delta = random.uniform(self.brightness_cfg['min'],
self.brightness_cfg['max'])
img += delta
results['img'] = img
return results
def transform(self, results: dict) -> dict:
"""The hue, saturation, contrast and brightness distortion function.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
results['img'] = results['img'].astype(np.float32)
functions = [
self.transform_brightness, self.transform_contrast,
self.transform_saturation, self.transform_hue
]
distortions = random.permutation(functions)[:self.num_distort_func]
for func in distortions:
results = func(results)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(hue_cfg={self.hue_cfg}, '
repr_str += f'saturation_cfg={self.saturation_cfg}, '
repr_str += f'contrast_cfg={self.contrast_cfg}, '
repr_str += f'brightness_cfg={self.brightness_cfg}, '
repr_str += f'num_distort_func={self.num_distort_func})'
return repr_str
@TRANSFORMS.register_module()
class PPYOLOERandomCrop(BaseTransform):
"""Random crop the img and bboxes. Different thresholds are used in PPYOLOE
to judge whether the clipped image meets the requirements. This
implementation is different from the implementation of RandomCrop in mmdet.
Required Keys:
- img
- gt_bboxes (BaseBoxes[torch.float32]) (optional)
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
Modified Keys:
- img
- img_shape
- gt_bboxes (optional)
- gt_bboxes_labels (optional)
- gt_ignore_flags (optional)
Added Keys:
- pad_param (np.float32)
Args:
aspect_ratio (List[float]): Aspect ratio of cropped region. Default to
[.5, 2].
thresholds (List[float]): Iou thresholds for deciding a valid bbox crop
in [min, max] format. Defaults to [.0, .1, .3, .5, .7, .9].
scaling (List[float]): Ratio between a cropped region and the original
image in [min, max] format. Default to [.3, 1.].
num_attempts (int): Number of tries for each threshold before
giving up. Default to 50.
allow_no_crop (bool): Allow return without actually cropping them.
Default to True.
cover_all_box (bool): Ensure all bboxes are covered in the final crop.
Default to False.
"""
def __init__(self,
aspect_ratio: List[float] = [.5, 2.],
thresholds: List[float] = [.0, .1, .3, .5, .7, .9],
scaling: List[float] = [.3, 1.],
num_attempts: int = 50,
allow_no_crop: bool = True,
cover_all_box: bool = False):
self.aspect_ratio = aspect_ratio
self.thresholds = thresholds
self.scaling = scaling
self.num_attempts = num_attempts
self.allow_no_crop = allow_no_crop
self.cover_all_box = cover_all_box
def _crop_data(self, results: dict, crop_box: Tuple[int, int, int, int],
valid_inds: np.ndarray) -> Union[dict, None]:
"""Function to randomly crop images, bounding boxes, masks, semantic
segmentation maps.
Args:
results (dict): Result dict from loading pipeline.
crop_box (Tuple[int, int, int, int]): Expected absolute coordinates
for cropping, (x1, y1, x2, y2).
valid_inds (np.ndarray): The indexes of gt that needs to be
retained.
Returns:
results (Union[dict, None]): Randomly cropped results, 'img_shape'
key in result dict is updated according to crop size. None will
be returned when there is no valid bbox after cropping.
"""
# crop the image
img = results['img']
crop_x1, crop_y1, crop_x2, crop_y2 = crop_box
img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
results['img'] = img
img_shape = img.shape
results['img_shape'] = img.shape
# crop bboxes accordingly and clip to the image boundary
if results.get('gt_bboxes', None) is not None:
bboxes = results['gt_bboxes']
bboxes.translate_([-crop_x1, -crop_y1])
bboxes.clip_(img_shape[:2])
results['gt_bboxes'] = bboxes[valid_inds]
if results.get('gt_ignore_flags', None) is not None:
results['gt_ignore_flags'] = \
results['gt_ignore_flags'][valid_inds]
if results.get('gt_bboxes_labels', None) is not None:
results['gt_bboxes_labels'] = \
results['gt_bboxes_labels'][valid_inds]
if results.get('gt_masks', None) is not None:
results['gt_masks'] = results['gt_masks'][
valid_inds.nonzero()[0]].crop(
np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
# crop semantic seg
if results.get('gt_seg_map', None) is not None:
results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,
crop_x1:crop_x2]
return results
@autocast_box_type()
def transform(self, results: dict) -> Union[dict, None]:
"""The random crop transform function.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
if results.get('gt_bboxes', None) is None or len(
results['gt_bboxes']) == 0:
return results
orig_img_h, orig_img_w = results['img'].shape[:2]
gt_bboxes = results['gt_bboxes']
thresholds = list(self.thresholds)
if self.allow_no_crop:
thresholds.append('no_crop')
random.shuffle(thresholds)
for thresh in thresholds:
# Determine the coordinates for cropping
if thresh == 'no_crop':
return results
found = False
for i in range(self.num_attempts):
crop_h, crop_w = self._get_crop_size((orig_img_h, orig_img_w))
if self.aspect_ratio is None:
if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:
continue
# get image crop_box
margin_h = max(orig_img_h - crop_h, 0)
margin_w = max(orig_img_w - crop_w, 0)
offset_h, offset_w = self._rand_offset((margin_h, margin_w))
crop_y1, crop_y2 = offset_h, offset_h + crop_h
crop_x1, crop_x2 = offset_w, offset_w + crop_w
crop_box = [crop_x1, crop_y1, crop_x2, crop_y2]
# Calculate the iou between gt_bboxes and crop_boxes
iou = self._iou_matrix(gt_bboxes,
np.array([crop_box], dtype=np.float32))
# If the maximum value of the iou is less than thresh,
# the current crop_box is considered invalid.
if iou.max() < thresh:
continue
# If cover_all_box == True and the minimum value of
# the iou is less than thresh, the current crop_box
# is considered invalid.
if self.cover_all_box and iou.min() < thresh:
continue
# Get which gt_bboxes to keep after cropping.
valid_inds = self._get_valid_inds(
gt_bboxes, np.array(crop_box, dtype=np.float32))
if valid_inds.size > 0:
found = True
break
if found:
results = self._crop_data(results, crop_box, valid_inds)
return results
return results
@cache_randomness
def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:
"""Randomly generate crop offset.
Args:
margin (Tuple[int, int]): The upper bound for the offset generated
randomly.
Returns:
Tuple[int, int]: The random offset for the crop.
"""
margin_h, margin_w = margin
offset_h = np.random.randint(0, margin_h + 1)
offset_w = np.random.randint(0, margin_w + 1)
return (offset_h, offset_w)
@cache_randomness
def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:
"""Randomly generates the crop size based on `image_size`.
Args:
image_size (Tuple[int, int]): (h, w).
Returns:
crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.
"""
h, w = image_size
scale = random.uniform(*self.scaling)
if self.aspect_ratio is not None:
min_ar, max_ar = self.aspect_ratio
aspect_ratio = random.uniform(
max(min_ar, scale**2), min(max_ar, scale**-2))
h_scale = scale / np.sqrt(aspect_ratio)
w_scale = scale * np.sqrt(aspect_ratio)
else:
h_scale = random.uniform(*self.scaling)
w_scale = random.uniform(*self.scaling)
crop_h = h * h_scale
crop_w = w * w_scale
return int(crop_h), int(crop_w)
def _iou_matrix(self,
gt_bbox: HorizontalBoxes,
crop_bbox: np.ndarray,
eps: float = 1e-10) -> np.ndarray:
"""Calculate iou between gt and image crop box.
Args:
gt_bbox (HorizontalBoxes): Ground truth bounding boxes.
crop_bbox (np.ndarray): Image crop coordinates in
[x1, y1, x2, y2] format.
eps (float): Default to 1e-10.
Return:
(np.ndarray): IoU.
"""
gt_bbox = gt_bbox.tensor.numpy()
lefttop = np.maximum(gt_bbox[:, np.newaxis, :2], crop_bbox[:, :2])
rightbottom = np.minimum(gt_bbox[:, np.newaxis, 2:], crop_bbox[:, 2:])
overlap = np.prod(
rightbottom - lefttop,
axis=2) * (lefttop < rightbottom).all(axis=2)
area_gt_bbox = np.prod(gt_bbox[:, 2:] - crop_bbox[:, :2], axis=1)
area_crop_bbox = np.prod(gt_bbox[:, 2:] - crop_bbox[:, :2], axis=1)
area_o = (area_gt_bbox[:, np.newaxis] + area_crop_bbox - overlap)
return overlap / (area_o + eps)
def _get_valid_inds(self, gt_bbox: HorizontalBoxes,
img_crop_bbox: np.ndarray) -> np.ndarray:
"""Get which Bboxes to keep at the current cropping coordinates.
Args:
gt_bbox (HorizontalBoxes): Ground truth bounding boxes.
img_crop_bbox (np.ndarray): Image crop coordinates in
[x1, y1, x2, y2] format.
Returns:
(np.ndarray): Valid indexes.
"""
cropped_box = gt_bbox.tensor.numpy().copy()
gt_bbox = gt_bbox.tensor.numpy().copy()
cropped_box[:, :2] = np.maximum(gt_bbox[:, :2], img_crop_bbox[:2])
cropped_box[:, 2:] = np.minimum(gt_bbox[:, 2:], img_crop_bbox[2:])
cropped_box[:, :2] -= img_crop_bbox[:2]
cropped_box[:, 2:] -= img_crop_bbox[:2]
centers = (gt_bbox[:, :2] + gt_bbox[:, 2:]) / 2
valid = np.logical_and(img_crop_bbox[:2] <= centers,
centers < img_crop_bbox[2:]).all(axis=1)
valid = np.logical_and(
valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))
return np.where(valid)[0]
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(aspect_ratio={self.aspect_ratio}, '
repr_str += f'thresholds={self.thresholds}, '
repr_str += f'scaling={self.scaling}, '
repr_str += f'num_attempts={self.num_attempts}, '
repr_str += f'allow_no_crop={self.allow_no_crop}, '
repr_str += f'cover_all_box={self.cover_all_box})'
return repr_str
@TRANSFORMS.register_module()
class YOLOv5CopyPaste(BaseTransform):
"""Copy-Paste used in YOLOv5 and YOLOv8.
This transform randomly copy some objects in the image to the mirror
position of the image.It is different from the `CopyPaste` in mmdet.
Required Keys:
- img (np.uint8)
- gt_bboxes (BaseBoxes[torch.float32])
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (bool) (optional)
- gt_masks (PolygonMasks) (optional)
Modified Keys:
- img
- gt_bboxes
- gt_bboxes_labels (np.int64) (optional)
- gt_ignore_flags (optional)
- gt_masks (optional)
Args:
ioa_thresh (float): Ioa thresholds for deciding valid bbox.
prob (float): Probability of choosing objects.
Defaults to 0.5.
"""
def __init__(self, ioa_thresh: float = 0.3, prob: float = 0.5):
self.ioa_thresh = ioa_thresh
self.prob = prob
@autocast_box_type()
def transform(self, results: dict) -> Union[dict, None]:
"""The YOLOv5 and YOLOv8 Copy-Paste transform function.
Args:
results (dict): The result dict.
Returns:
dict: The result dict.
"""
if len(results.get('gt_masks', [])) == 0:
return results
gt_masks = results['gt_masks']
assert isinstance(gt_masks, PolygonMasks),\
'only support type of PolygonMasks,' \
' but get type: %s' % type(gt_masks)
gt_bboxes = results['gt_bboxes']
gt_bboxes_labels = results.get('gt_bboxes_labels', None)
img = results['img']
img_h, img_w = img.shape[:2]
# calculate ioa
gt_bboxes_flip = deepcopy(gt_bboxes)
gt_bboxes_flip.flip_(img.shape)
ioa = self.bbox_ioa(gt_bboxes_flip, gt_bboxes)
indexes = torch.nonzero((ioa < self.ioa_thresh).all(1))[:, 0]
n = len(indexes)
valid_inds = random.choice(
indexes, size=round(self.prob * n), replace=False)
if len(valid_inds) == 0:
return results
if gt_bboxes_labels is not None:
# prepare labels
gt_bboxes_labels = np.concatenate(
(gt_bboxes_labels, gt_bboxes_labels[valid_inds]), axis=0)
# prepare bboxes
copypaste_bboxes = gt_bboxes_flip[valid_inds]
gt_bboxes = gt_bboxes.cat([gt_bboxes, copypaste_bboxes])
# prepare images
copypaste_gt_masks = gt_masks[valid_inds]
copypaste_gt_masks_flip = copypaste_gt_masks.flip()
# convert poly format to bitmap format
# example: poly: [[array(0.0, 0.0, 10.0, 0.0, 10.0, 10.0, 0.0, 10.0]]
# -> bitmap: a mask with shape equal to (1, img_h, img_w)
# # type1 low speed
# copypaste_gt_masks_bitmap = copypaste_gt_masks.to_ndarray()
# copypaste_mask = np.sum(copypaste_gt_masks_bitmap, axis=0) > 0
# type2
copypaste_mask = np.zeros((img_h, img_w), dtype=np.uint8)
for poly in copypaste_gt_masks.masks:
poly = [i.reshape((-1, 1, 2)).astype(np.int32) for i in poly]
cv2.drawContours(copypaste_mask, poly, -1, (1, ), cv2.FILLED)
copypaste_mask = copypaste_mask.astype(bool)
# copy objects, and paste to the mirror position of the image
copypaste_mask_flip = mmcv.imflip(
copypaste_mask, direction='horizontal')
copypaste_img = mmcv.imflip(img, direction='horizontal')
img[copypaste_mask_flip] = copypaste_img[copypaste_mask_flip]
# prepare masks
gt_masks = copypaste_gt_masks.cat([gt_masks, copypaste_gt_masks_flip])
if 'gt_ignore_flags' in results:
# prepare gt_ignore_flags
gt_ignore_flags = results['gt_ignore_flags']
gt_ignore_flags = np.concatenate(
[gt_ignore_flags, gt_ignore_flags[valid_inds]], axis=0)
results['gt_ignore_flags'] = gt_ignore_flags
results['img'] = img
results['gt_bboxes'] = gt_bboxes
if gt_bboxes_labels is not None:
results['gt_bboxes_labels'] = gt_bboxes_labels
results['gt_masks'] = gt_masks
return results
@staticmethod
def bbox_ioa(gt_bboxes_flip: HorizontalBoxes,
gt_bboxes: HorizontalBoxes,
eps: float = 1e-7) -> np.ndarray:
"""Calculate ioa between gt_bboxes_flip and gt_bboxes.
Args:
gt_bboxes_flip (HorizontalBoxes): Flipped ground truth
bounding boxes.
gt_bboxes (HorizontalBoxes): Ground truth bounding boxes.
eps (float): Default to 1e-10.
Return:
(Tensor): Ioa.
"""
gt_bboxes_flip = gt_bboxes_flip.tensor
gt_bboxes = gt_bboxes.tensor
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = gt_bboxes_flip.T
b2_x1, b2_y1, b2_x2, b2_y2 = gt_bboxes.T
# Intersection area
inter_area = (torch.minimum(b1_x2[:, None],
b2_x2) - torch.maximum(b1_x1[:, None],
b2_x1)).clip(0) * \
(torch.minimum(b1_y2[:, None],
b2_y2) - torch.maximum(b1_y1[:, None],
b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps
# Intersection over box2 area
return inter_area / box2_area
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(ioa_thresh={self.ioa_thresh},'
repr_str += f'prob={self.prob})'
return repr_str
@TRANSFORMS.register_module()
class RemoveDataElement(BaseTransform):
"""Remove unnecessary data element in results.
Args:
keys (Union[str, Sequence[str]]): Keys need to be removed.
"""
def __init__(self, keys: Union[str, Sequence[str]]):
self.keys = [keys] if isinstance(keys, str) else keys
def transform(self, results: dict) -> dict:
for key in self.keys:
results.pop(key, None)
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
repr_str += f'(keys={self.keys})'
return repr_str
@TRANSFORMS.register_module()
class RegularizeRotatedBox(BaseTransform):
"""Regularize rotated boxes.
Due to the angle periodicity, one rotated box can be represented in
many different (x, y, w, h, t). To make each rotated box unique,
``regularize_boxes`` will take the remainder of the angle divided by
180 degrees.
For convenience, three angle_version can be used here:
- 'oc': OpenCV Definition. Has the same box representation as
``cv2.minAreaRect`` the angle ranges in [-90, 0).
- 'le90': Long Edge Definition (90). the angle ranges in [-90, 90).
The width is always longer than the height.
- 'le135': Long Edge Definition (135). the angle ranges in [-45, 135).
The width is always longer than the height.
Required Keys:
- gt_bboxes (RotatedBoxes[torch.float32])
Modified Keys:
- gt_bboxes
Args:
angle_version (str): Angle version. Can only be 'oc',
'le90', or 'le135'. Defaults to 'le90.
"""
def __init__(self, angle_version='le90') -> None:
self.angle_version = angle_version
try:
from mmrotate.structures.bbox import RotatedBoxes
self.box_type = RotatedBoxes
except ImportError:
raise ImportError(
'Please run "mim install -r requirements/mmrotate.txt" '
'to install mmrotate first for rotated detection.')
def transform(self, results: dict) -> dict:
assert isinstance(results['gt_bboxes'], self.box_type)
results['gt_bboxes'] = self.box_type(
results['gt_bboxes'].regularize_boxes(self.angle_version))
return results
| 59,261 | 37.037227 | 79 | py |
mmyolo | mmyolo-main/mmyolo/deploy/object_detection.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Callable, Dict, Optional
import torch
from mmdeploy.codebase.base import CODEBASE, MMCodebase
from mmdeploy.codebase.mmdet.deploy import ObjectDetection
from mmdeploy.utils import Codebase, Task
from mmengine import Config
from mmengine.registry import Registry
MMYOLO_TASK = Registry('mmyolo_tasks')
@CODEBASE.register_module(Codebase.MMYOLO.value)
class MMYOLO(MMCodebase):
"""MMYOLO codebase class."""
task_registry = MMYOLO_TASK
@classmethod
def register_deploy_modules(cls):
"""register all rewriters for mmdet."""
import mmdeploy.codebase.mmdet.models # noqa: F401
import mmdeploy.codebase.mmdet.ops # noqa: F401
import mmdeploy.codebase.mmdet.structures # noqa: F401
@classmethod
def register_all_modules(cls):
"""register all modules."""
from mmdet.utils.setup_env import \
register_all_modules as register_all_modules_mmdet
from mmyolo.utils.setup_env import \
register_all_modules as register_all_modules_mmyolo
cls.register_deploy_modules()
register_all_modules_mmyolo(True)
register_all_modules_mmdet(False)
def _get_dataset_metainfo(model_cfg: Config):
"""Get metainfo of dataset.
Args:
model_cfg Config: Input model Config object.
Returns:
list[str]: A list of string specifying names of different class.
"""
from mmyolo import datasets # noqa
from mmyolo.registry import DATASETS
module_dict = DATASETS.module_dict
for dataloader_name in [
'test_dataloader', 'val_dataloader', 'train_dataloader'
]:
if dataloader_name not in model_cfg:
continue
dataloader_cfg = model_cfg[dataloader_name]
dataset_cfg = dataloader_cfg.dataset
dataset_cls = module_dict.get(dataset_cfg.type, None)
if dataset_cls is None:
continue
if hasattr(dataset_cls, '_load_metainfo') and isinstance(
dataset_cls._load_metainfo, Callable):
meta = dataset_cls._load_metainfo(
dataset_cfg.get('metainfo', None))
if meta is not None:
return meta
if hasattr(dataset_cls, 'METAINFO'):
return dataset_cls.METAINFO
return None
@MMYOLO_TASK.register_module(Task.OBJECT_DETECTION.value)
class YOLOObjectDetection(ObjectDetection):
"""YOLO Object Detection task."""
def get_visualizer(self, name: str, save_dir: str):
"""Get visualizer.
Args:
name (str): Name of visualizer.
save_dir (str): Directory to save visualization results.
Returns:
Visualizer: A visualizer instance.
"""
from mmdet.visualization import DetLocalVisualizer # noqa: F401,F403
metainfo = _get_dataset_metainfo(self.model_cfg)
visualizer = super().get_visualizer(name, save_dir)
if metainfo is not None:
visualizer.dataset_meta = metainfo
return visualizer
def build_pytorch_model(self,
model_checkpoint: Optional[str] = None,
cfg_options: Optional[Dict] = None,
**kwargs) -> torch.nn.Module:
"""Initialize torch model.
Args:
model_checkpoint (str): The checkpoint file of torch model,
defaults to `None`.
cfg_options (dict): Optional config key-pair parameters.
Returns:
nn.Module: An initialized torch model generated by other OpenMMLab
codebases.
"""
from copy import deepcopy
from mmengine.model import revert_sync_batchnorm
from mmengine.registry import MODELS
from mmyolo.utils import switch_to_deploy
model = deepcopy(self.model_cfg.model)
preprocess_cfg = deepcopy(self.model_cfg.get('preprocess_cfg', {}))
preprocess_cfg.update(
deepcopy(self.model_cfg.get('data_preprocessor', {})))
model.setdefault('data_preprocessor', preprocess_cfg)
model = MODELS.build(model)
if model_checkpoint is not None:
from mmengine.runner.checkpoint import load_checkpoint
load_checkpoint(model, model_checkpoint, map_location=self.device)
model = revert_sync_batchnorm(model)
switch_to_deploy(model)
model = model.to(self.device)
model.eval()
return model
| 4,523 | 33.015038 | 78 | py |
mmyolo | mmyolo-main/mmyolo/deploy/models/layers/bbox_nms.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdeploy.core import mark
from torch import Tensor
def _efficient_nms(
boxes: Tensor,
scores: Tensor,
max_output_boxes_per_class: int = 1000,
iou_threshold: float = 0.5,
score_threshold: float = 0.05,
pre_top_k: int = -1,
keep_top_k: int = 100,
box_coding: int = 0,
):
"""Wrapper for `efficient_nms` with TensorRT.
Args:
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms. Defaults to 1000.
iou_threshold (float): IOU threshold of nms. Defaults to 0.5.
score_threshold (float): score threshold of nms.
Defaults to 0.05.
pre_top_k (int): Number of top K boxes to keep before nms.
Defaults to -1.
keep_top_k (int): Number of top K boxes to keep after nms.
Defaults to -1.
box_coding (int): Bounding boxes format for nms.
Defaults to 0 means [x, y, w, h].
Set to 1 means [x1, y1 ,x2, y2].
Returns:
tuple[Tensor, Tensor]: (dets, labels), `dets` of shape [N, num_det, 5]
and `labels` of shape [N, num_det].
"""
boxes = boxes if boxes.dim() == 4 else boxes.unsqueeze(2)
_, det_boxes, det_scores, labels = TRTEfficientNMSop.apply(
boxes, scores, -1, box_coding, iou_threshold, keep_top_k, '1', 0,
score_threshold)
dets = torch.cat([det_boxes, det_scores.unsqueeze(2)], -1)
# retain shape info
batch_size = boxes.size(0)
dets_shape = dets.shape
label_shape = labels.shape
dets = dets.reshape([batch_size, *dets_shape[1:]])
labels = labels.reshape([batch_size, *label_shape[1:]])
return dets, labels
@mark('efficient_nms', inputs=['boxes', 'scores'], outputs=['dets', 'labels'])
def efficient_nms(*args, **kwargs):
"""Wrapper function for `_efficient_nms`."""
return _efficient_nms(*args, **kwargs)
class TRTEfficientNMSop(torch.autograd.Function):
"""Efficient NMS op for TensorRT."""
@staticmethod
def forward(
ctx,
boxes,
scores,
background_class=-1,
box_coding=0,
iou_threshold=0.45,
max_output_boxes=100,
plugin_version='1',
score_activation=0,
score_threshold=0.25,
):
"""Forward function of TRTEfficientNMSop."""
batch_size, num_boxes, num_classes = scores.shape
num_det = torch.randint(
0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
det_boxes = torch.randn(batch_size, max_output_boxes, 4)
det_scores = torch.randn(batch_size, max_output_boxes)
det_classes = torch.randint(
0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
return num_det, det_boxes, det_scores, det_classes
@staticmethod
def symbolic(g,
boxes,
scores,
background_class=-1,
box_coding=0,
iou_threshold=0.45,
max_output_boxes=100,
plugin_version='1',
score_activation=0,
score_threshold=0.25):
"""Symbolic function of TRTEfficientNMSop."""
out = g.op(
'TRT::EfficientNMS_TRT',
boxes,
scores,
background_class_i=background_class,
box_coding_i=box_coding,
iou_threshold_f=iou_threshold,
max_output_boxes_i=max_output_boxes,
plugin_version_s=plugin_version,
score_activation_i=score_activation,
score_threshold_f=score_threshold,
outputs=4)
nums, boxes, scores, classes = out
return nums, boxes, scores, classes
| 3,931 | 33.491228 | 78 | py |
mmyolo | mmyolo-main/mmyolo/deploy/models/dense_heads/yolov5_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from functools import partial
from typing import List, Optional, Tuple
import torch
from mmdeploy.codebase.mmdet import get_post_processing_params
from mmdeploy.codebase.mmdet.models.layers import multiclass_nms
from mmdeploy.core import FUNCTION_REWRITER
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.deploy.models.layers import efficient_nms
from mmyolo.models.dense_heads import YOLOv5Head
def yolov5_bbox_decoder(priors: Tensor, bbox_preds: Tensor,
stride: int) -> Tensor:
"""Decode YOLOv5 bounding boxes.
Args:
priors (Tensor): Prior boxes in center-offset form.
bbox_preds (Tensor): Predicted bounding boxes.
stride (int): Stride of the feature map.
Returns:
Tensor: Decoded bounding boxes.
"""
bbox_preds = bbox_preds.sigmoid()
x_center = (priors[..., 0] + priors[..., 2]) * 0.5
y_center = (priors[..., 1] + priors[..., 3]) * 0.5
w = priors[..., 2] - priors[..., 0]
h = priors[..., 3] - priors[..., 1]
x_center_pred = (bbox_preds[..., 0] - 0.5) * 2 * stride + x_center
y_center_pred = (bbox_preds[..., 1] - 0.5) * 2 * stride + y_center
w_pred = (bbox_preds[..., 2] * 2)**2 * w
h_pred = (bbox_preds[..., 3] * 2)**2 * h
decoded_bboxes = torch.stack(
[x_center_pred, y_center_pred, w_pred, h_pred], dim=-1)
return decoded_bboxes
@FUNCTION_REWRITER.register_rewriter(
func_name='mmyolo.models.dense_heads.yolov5_head.'
'YOLOv5Head.predict_by_feat')
def yolov5_head__predict_by_feat(self,
cls_scores: List[Tensor],
bbox_preds: List[Tensor],
objectnesses: Optional[List[Tensor]] = None,
batch_img_metas: Optional[List[dict]] = None,
cfg: Optional[ConfigDict] = None,
rescale: bool = False,
with_nms: bool = True) -> Tuple[InstanceData]:
"""Transform a batch of output features extracted by the head into
bbox results.
Args:
cls_scores (list[Tensor]): Classification scores for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * num_classes, H, W).
bbox_preds (list[Tensor]): Box energies / deltas for all
scale levels, each is a 4D-tensor, has shape
(batch_size, num_priors * 4, H, W).
objectnesses (list[Tensor], Optional): Score factor for
all scale level, each is a 4D-tensor, has shape
(batch_size, 1, H, W).
batch_img_metas (list[dict], Optional): Batch image meta info.
Defaults to None.
cfg (ConfigDict, optional): Test / postprocessing
configuration, if None, test_cfg would be used.
Defaults to None.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
tuple[Tensor, Tensor]: The first item is an (N, num_box, 5) tensor,
where 5 represent (tl_x, tl_y, br_x, br_y, score), N is batch
size and the score between 0 and 1. The shape of the second
tensor in the tuple is (N, num_box), and each element
represents the class label of the corresponding box.
"""
ctx = FUNCTION_REWRITER.get_context()
detector_type = type(self)
deploy_cfg = ctx.cfg
use_efficientnms = deploy_cfg.get('use_efficientnms', False)
dtype = cls_scores[0].dtype
device = cls_scores[0].device
bbox_decoder = self.bbox_coder.decode
nms_func = multiclass_nms
if use_efficientnms:
if detector_type is YOLOv5Head:
nms_func = partial(efficient_nms, box_coding=0)
bbox_decoder = yolov5_bbox_decoder
else:
nms_func = efficient_nms
assert len(cls_scores) == len(bbox_preds)
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
num_imgs = cls_scores[0].shape[0]
featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]
mlvl_priors = self.prior_generator.grid_priors(
featmap_sizes, dtype=dtype, device=device)
flatten_priors = torch.cat(mlvl_priors)
mlvl_strides = [
flatten_priors.new_full(
(featmap_size[0] * featmap_size[1] * self.num_base_priors, ),
stride)
for featmap_size, stride in zip(featmap_sizes, self.featmap_strides)
]
flatten_stride = torch.cat(mlvl_strides)
# flatten cls_scores, bbox_preds and objectness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.num_classes)
for cls_score in cls_scores
]
cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
for bbox_pred in bbox_preds
]
flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)
if objectnesses is not None:
flatten_objectness = [
objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)
for objectness in objectnesses
]
flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()
cls_scores = cls_scores * (flatten_objectness.unsqueeze(-1))
scores = cls_scores
bboxes = bbox_decoder(flatten_priors[None], flatten_bbox_preds,
flatten_stride)
if not with_nms:
return bboxes, scores
post_params = get_post_processing_params(deploy_cfg)
max_output_boxes_per_class = post_params.max_output_boxes_per_class
iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold)
score_threshold = cfg.get('score_thr', post_params.score_threshold)
pre_top_k = post_params.pre_top_k
keep_top_k = cfg.get('max_per_img', post_params.keep_top_k)
return nms_func(bboxes, scores, max_output_boxes_per_class, iou_threshold,
score_threshold, pre_top_k, keep_top_k)
@FUNCTION_REWRITER.register_rewriter(
func_name='mmyolo.models.dense_heads.yolov5_head.'
'YOLOv5Head.predict',
backend='rknn')
def yolov5_head__predict__rknn(self, x: Tuple[Tensor], *args,
**kwargs) -> Tuple[Tensor, Tensor, Tensor]:
"""Perform forward propagation of the detection head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Multi-level features from the
upstream network, each is a 4D-tensor.
"""
outs = self(x)
return outs
@FUNCTION_REWRITER.register_rewriter(
func_name='mmyolo.models.dense_heads.yolov5_head.'
'YOLOv5HeadModule.forward',
backend='rknn')
def yolov5_head_module__forward__rknn(
self, x: Tensor, *args, **kwargs) -> Tuple[Tensor, Tensor, Tensor]:
"""Forward feature of a single scale level."""
out = []
for i, feat in enumerate(x):
out.append(self.convs_pred[i](feat))
return out
| 7,242 | 37.121053 | 79 | py |
mmyolo | mmyolo-main/mmyolo/engine/optimizers/yolov5_optim_constructor.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
from mmengine.dist import get_world_size
from mmengine.logging import print_log
from mmengine.model import is_model_wrapper
from mmengine.optim import OptimWrapper
from mmyolo.registry import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS,
OPTIMIZERS)
@OPTIM_WRAPPER_CONSTRUCTORS.register_module()
class YOLOv5OptimizerConstructor:
"""YOLOv5 constructor for optimizers.
It has the following functions:
- divides the optimizer parameters into 3 groups:
Conv, Bias and BN
- support `weight_decay` parameter adaption based on
`batch_size_per_gpu`
Args:
optim_wrapper_cfg (dict): The config dict of the optimizer wrapper.
Positional fields are
- ``type``: class name of the OptimizerWrapper
- ``optimizer``: The configuration of optimizer.
Optional fields are
- any arguments of the corresponding optimizer wrapper type,
e.g., accumulative_counts, clip_grad, etc.
The positional fields of ``optimizer`` are
- `type`: class name of the optimizer.
Optional fields are
- any arguments of the corresponding optimizer type, e.g.,
lr, weight_decay, momentum, etc.
paramwise_cfg (dict, optional): Parameter-wise options. Must include
`base_total_batch_size` if not None. If the total input batch
is smaller than `base_total_batch_size`, the `weight_decay`
parameter will be kept unchanged, otherwise linear scaling.
Example:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> optim_wrapper_cfg = dict(
>>> dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01,
>>> momentum=0.9, weight_decay=0.0001, batch_size_per_gpu=16))
>>> paramwise_cfg = dict(base_total_batch_size=64)
>>> optim_wrapper_builder = YOLOv5OptimizerConstructor(
>>> optim_wrapper_cfg, paramwise_cfg)
>>> optim_wrapper = optim_wrapper_builder(model)
"""
def __init__(self,
optim_wrapper_cfg: dict,
paramwise_cfg: Optional[dict] = None):
if paramwise_cfg is None:
paramwise_cfg = {'base_total_batch_size': 64}
assert 'base_total_batch_size' in paramwise_cfg
if not isinstance(optim_wrapper_cfg, dict):
raise TypeError('optimizer_cfg should be a dict',
f'but got {type(optim_wrapper_cfg)}')
assert 'optimizer' in optim_wrapper_cfg, (
'`optim_wrapper_cfg` must contain "optimizer" config')
self.optim_wrapper_cfg = optim_wrapper_cfg
self.optimizer_cfg = self.optim_wrapper_cfg.pop('optimizer')
self.base_total_batch_size = paramwise_cfg['base_total_batch_size']
def __call__(self, model: nn.Module) -> OptimWrapper:
if is_model_wrapper(model):
model = model.module
optimizer_cfg = self.optimizer_cfg.copy()
weight_decay = optimizer_cfg.pop('weight_decay', 0)
if 'batch_size_per_gpu' in optimizer_cfg:
batch_size_per_gpu = optimizer_cfg.pop('batch_size_per_gpu')
# No scaling if total_batch_size is less than
# base_total_batch_size, otherwise linear scaling.
total_batch_size = get_world_size() * batch_size_per_gpu
accumulate = max(
round(self.base_total_batch_size / total_batch_size), 1)
scale_factor = total_batch_size * \
accumulate / self.base_total_batch_size
if scale_factor != 1:
weight_decay *= scale_factor
print_log(f'Scaled weight_decay to {weight_decay}', 'current')
params_groups = [], [], []
for v in model.modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
params_groups[2].append(v.bias)
# Includes SyncBatchNorm
if isinstance(v, nn.modules.batchnorm._NormBase):
params_groups[1].append(v.weight)
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
params_groups[0].append(v.weight)
# Note: Make sure bias is in the last parameter group
optimizer_cfg['params'] = []
# conv
optimizer_cfg['params'].append({
'params': params_groups[0],
'weight_decay': weight_decay
})
# bn
optimizer_cfg['params'].append({'params': params_groups[1]})
# bias
optimizer_cfg['params'].append({'params': params_groups[2]})
print_log(
'Optimizer groups: %g .bias, %g conv.weight, %g other' %
(len(params_groups[2]), len(params_groups[0]), len(
params_groups[1])), 'current')
del params_groups
optimizer = OPTIMIZERS.build(optimizer_cfg)
optim_wrapper = OPTIM_WRAPPERS.build(
self.optim_wrapper_cfg, default_args=dict(optimizer=optimizer))
return optim_wrapper
| 5,201 | 38.112782 | 78 | py |
mmyolo | mmyolo-main/mmyolo/engine/optimizers/yolov7_optim_wrapper_constructor.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch.nn as nn
from mmengine.dist import get_world_size
from mmengine.logging import print_log
from mmengine.model import is_model_wrapper
from mmengine.optim import OptimWrapper
from mmyolo.models.dense_heads.yolov7_head import ImplicitA, ImplicitM
from mmyolo.registry import (OPTIM_WRAPPER_CONSTRUCTORS, OPTIM_WRAPPERS,
OPTIMIZERS)
# TODO: Consider merging into YOLOv5OptimizerConstructor
@OPTIM_WRAPPER_CONSTRUCTORS.register_module()
class YOLOv7OptimWrapperConstructor:
"""YOLOv7 constructor for optimizer wrappers.
It has the following functions:
- divides the optimizer parameters into 3 groups:
Conv, Bias and BN/ImplicitA/ImplicitM
- support `weight_decay` parameter adaption based on
`batch_size_per_gpu`
Args:
optim_wrapper_cfg (dict): The config dict of the optimizer wrapper.
Positional fields are
- ``type``: class name of the OptimizerWrapper
- ``optimizer``: The configuration of optimizer.
Optional fields are
- any arguments of the corresponding optimizer wrapper type,
e.g., accumulative_counts, clip_grad, etc.
The positional fields of ``optimizer`` are
- `type`: class name of the optimizer.
Optional fields are
- any arguments of the corresponding optimizer type, e.g.,
lr, weight_decay, momentum, etc.
paramwise_cfg (dict, optional): Parameter-wise options. Must include
`base_total_batch_size` if not None. If the total input batch
is smaller than `base_total_batch_size`, the `weight_decay`
parameter will be kept unchanged, otherwise linear scaling.
Example:
>>> model = torch.nn.modules.Conv1d(1, 1, 1)
>>> optim_wrapper_cfg = dict(
>>> dict(type='OptimWrapper', optimizer=dict(type='SGD', lr=0.01,
>>> momentum=0.9, weight_decay=0.0001, batch_size_per_gpu=16))
>>> paramwise_cfg = dict(base_total_batch_size=64)
>>> optim_wrapper_builder = YOLOv7OptimWrapperConstructor(
>>> optim_wrapper_cfg, paramwise_cfg)
>>> optim_wrapper = optim_wrapper_builder(model)
"""
def __init__(self,
optim_wrapper_cfg: dict,
paramwise_cfg: Optional[dict] = None):
if paramwise_cfg is None:
paramwise_cfg = {'base_total_batch_size': 64}
assert 'base_total_batch_size' in paramwise_cfg
if not isinstance(optim_wrapper_cfg, dict):
raise TypeError('optimizer_cfg should be a dict',
f'but got {type(optim_wrapper_cfg)}')
assert 'optimizer' in optim_wrapper_cfg, (
'`optim_wrapper_cfg` must contain "optimizer" config')
self.optim_wrapper_cfg = optim_wrapper_cfg
self.optimizer_cfg = self.optim_wrapper_cfg.pop('optimizer')
self.base_total_batch_size = paramwise_cfg['base_total_batch_size']
def __call__(self, model: nn.Module) -> OptimWrapper:
if is_model_wrapper(model):
model = model.module
optimizer_cfg = self.optimizer_cfg.copy()
weight_decay = optimizer_cfg.pop('weight_decay', 0)
if 'batch_size_per_gpu' in optimizer_cfg:
batch_size_per_gpu = optimizer_cfg.pop('batch_size_per_gpu')
# No scaling if total_batch_size is less than
# base_total_batch_size, otherwise linear scaling.
total_batch_size = get_world_size() * batch_size_per_gpu
accumulate = max(
round(self.base_total_batch_size / total_batch_size), 1)
scale_factor = total_batch_size * \
accumulate / self.base_total_batch_size
if scale_factor != 1:
weight_decay *= scale_factor
print_log(f'Scaled weight_decay to {weight_decay}', 'current')
params_groups = [], [], []
for v in model.modules():
# no decay
# Caution: Coupling with model
if isinstance(v, (ImplicitA, ImplicitM)):
params_groups[0].append(v.implicit)
elif isinstance(v, nn.modules.batchnorm._NormBase):
params_groups[0].append(v.weight)
# apply decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
params_groups[1].append(v.weight) # apply decay
# biases, no decay
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
params_groups[2].append(v.bias)
# Note: Make sure bias is in the last parameter group
optimizer_cfg['params'] = []
# conv
optimizer_cfg['params'].append({
'params': params_groups[1],
'weight_decay': weight_decay
})
# bn ...
optimizer_cfg['params'].append({'params': params_groups[0]})
# bias
optimizer_cfg['params'].append({'params': params_groups[2]})
print_log(
'Optimizer groups: %g .bias, %g conv.weight, %g other' %
(len(params_groups[2]), len(params_groups[1]), len(
params_groups[0])), 'current')
del params_groups
optimizer = OPTIMIZERS.build(optimizer_cfg)
optim_wrapper = OPTIM_WRAPPERS.build(
self.optim_wrapper_cfg, default_args=dict(optimizer=optimizer))
return optim_wrapper
| 5,576 | 38.835714 | 78 | py |
mmyolo | mmyolo-main/mmyolo/utils/large_image.py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence, Tuple
import torch
from mmcv.ops import batched_nms
from mmdet.structures import DetDataSample, SampleList
from mmengine.structures import InstanceData
def shift_rbboxes(bboxes: torch.Tensor, offset: Sequence[int]):
"""Shift rotated bboxes with offset.
Args:
bboxes (Tensor): The rotated bboxes need to be translated.
With shape (n, 5), which means (x, y, w, h, a).
offset (Sequence[int]): The translation offsets with shape of (2, ).
Returns:
Tensor: Shifted rotated bboxes.
"""
offset_tensor = bboxes.new_tensor(offset)
shifted_bboxes = bboxes.clone()
shifted_bboxes[:, 0:2] = shifted_bboxes[:, 0:2] + offset_tensor
return shifted_bboxes
def shift_predictions(det_data_samples: SampleList,
offsets: Sequence[Tuple[int, int]],
src_image_shape: Tuple[int, int]) -> SampleList:
"""Shift predictions to the original image.
Args:
det_data_samples (List[:obj:`DetDataSample`]): A list of patch results.
offsets (Sequence[Tuple[int, int]]): Positions of the left top points
of patches.
src_image_shape (Tuple[int, int]): A (height, width) tuple of the large
image's width and height.
Returns:
(List[:obj:`DetDataSample`]): shifted results.
"""
try:
from sahi.slicing import shift_bboxes, shift_masks
except ImportError:
raise ImportError('Please run "pip install -U sahi" '
'to install sahi first for large image inference.')
assert len(det_data_samples) == len(
offsets), 'The `results` should has the ' 'same length with `offsets`.'
shifted_predictions = []
for det_data_sample, offset in zip(det_data_samples, offsets):
pred_inst = det_data_sample.pred_instances.clone()
# Check bbox type
if pred_inst.bboxes.size(-1) == 4:
# Horizontal bboxes
shifted_bboxes = shift_bboxes(pred_inst.bboxes, offset)
elif pred_inst.bboxes.size(-1) == 5:
# Rotated bboxes
shifted_bboxes = shift_rbboxes(pred_inst.bboxes, offset)
else:
raise NotImplementedError
# shift bboxes and masks
pred_inst.bboxes = shifted_bboxes
if 'masks' in det_data_sample:
pred_inst.masks = shift_masks(pred_inst.masks, offset,
src_image_shape)
shifted_predictions.append(pred_inst.clone())
shifted_predictions = InstanceData.cat(shifted_predictions)
return shifted_predictions
def merge_results_by_nms(results: SampleList, offsets: Sequence[Tuple[int,
int]],
src_image_shape: Tuple[int, int],
nms_cfg: dict) -> DetDataSample:
"""Merge patch results by nms.
Args:
results (List[:obj:`DetDataSample`]): A list of patch results.
offsets (Sequence[Tuple[int, int]]): Positions of the left top points
of patches.
src_image_shape (Tuple[int, int]): A (height, width) tuple of the large
image's width and height.
nms_cfg (dict): it should specify nms type and other parameters
like `iou_threshold`.
Returns:
:obj:`DetDataSample`: merged results.
"""
shifted_instances = shift_predictions(results, offsets, src_image_shape)
_, keeps = batched_nms(
boxes=shifted_instances.bboxes,
scores=shifted_instances.scores,
idxs=shifted_instances.labels,
nms_cfg=nms_cfg)
merged_instances = shifted_instances[keeps]
merged_result = results[0].clone()
merged_result.pred_instances = merged_instances
return merged_result
| 3,871 | 36.230769 | 79 | py |
mmyolo | mmyolo-main/mmyolo/utils/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import urllib
import numpy as np
import torch
from mmengine.utils import scandir
from prettytable import PrettyTable
from mmyolo.models import RepVGGBlock
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def switch_to_deploy(model):
"""Model switch to deploy status."""
for layer in model.modules():
if isinstance(layer, RepVGGBlock):
layer.switch_to_deploy()
print('Switch model to deploy modality.')
def auto_arrange_images(image_list: list, image_column: int = 2) -> np.ndarray:
"""Auto arrange image to image_column x N row.
Args:
image_list (list): cv2 image list.
image_column (int): Arrange to N column. Default: 2.
Return:
(np.ndarray): image_column x N row merge image
"""
img_count = len(image_list)
if img_count <= image_column:
# no need to arrange
image_show = np.concatenate(image_list, axis=1)
else:
# arrange image according to image_column
image_row = round(img_count / image_column)
fill_img_list = [np.ones(image_list[0].shape, dtype=np.uint8) * 255
] * (
image_row * image_column - img_count)
image_list.extend(fill_img_list)
merge_imgs_col = []
for i in range(image_row):
start_col = image_column * i
end_col = image_column * (i + 1)
merge_col = np.hstack(image_list[start_col:end_col])
merge_imgs_col.append(merge_col)
# merge to one image
image_show = np.vstack(merge_imgs_col)
return image_show
def get_file_list(source_root: str) -> [list, dict]:
"""Get file list.
Args:
source_root (str): image or video source path
Return:
source_file_path_list (list): A list for all source file.
source_type (dict): Source type: file or url or dir.
"""
is_dir = os.path.isdir(source_root)
is_url = source_root.startswith(('http:/', 'https:/'))
is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS
source_file_path_list = []
if is_dir:
# when input source is dir
for file in scandir(source_root, IMG_EXTENSIONS, recursive=True):
source_file_path_list.append(os.path.join(source_root, file))
elif is_url:
# when input source is url
filename = os.path.basename(
urllib.parse.unquote(source_root).split('?')[0])
file_save_path = os.path.join(os.getcwd(), filename)
print(f'Downloading source file to {file_save_path}')
torch.hub.download_url_to_file(source_root, file_save_path)
source_file_path_list = [file_save_path]
elif is_file:
# when input source is single image
source_file_path_list = [source_root]
else:
print('Cannot find image file.')
source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file)
return source_file_path_list, source_type
def show_data_classes(data_classes):
"""When printing an error, all class names of the dataset."""
print('\n\nThe name of the class contained in the dataset:')
data_classes_info = PrettyTable()
data_classes_info.title = 'Information of dataset class'
# List Print Settings
# If the quantity is too large, 25 rows will be displayed in each column
if len(data_classes) < 25:
data_classes_info.add_column('Class name', data_classes)
elif len(data_classes) % 25 != 0 and len(data_classes) > 25:
col_num = int(len(data_classes) / 25) + 1
data_name_list = list(data_classes)
for i in range(0, (col_num * 25) - len(data_classes)):
data_name_list.append('')
for i in range(0, len(data_name_list), 25):
data_classes_info.add_column('Class name',
data_name_list[i:i + 25])
# Align display data to the left
data_classes_info.align['Class name'] = 'l'
print(data_classes_info)
def is_metainfo_lower(cfg):
"""Determine whether the custom metainfo fields are all lowercase."""
def judge_keys(dataloader_cfg):
while 'dataset' in dataloader_cfg:
dataloader_cfg = dataloader_cfg['dataset']
if 'metainfo' in dataloader_cfg:
all_keys = dataloader_cfg['metainfo'].keys()
all_is_lower = all([str(k).islower() for k in all_keys])
assert all_is_lower, f'The keys in dataset metainfo must be all lowercase, but got {all_keys}. ' \
f'Please refer to https://github.com/open-mmlab/mmyolo/blob/e62c8c4593/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py#L8' # noqa
judge_keys(cfg.get('train_dataloader', {}))
judge_keys(cfg.get('val_dataloader', {}))
judge_keys(cfg.get('test_dataloader', {}))
| 4,932 | 35.813433 | 175 | py |
mmyolo | mmyolo-main/mmyolo/utils/boxam_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import bisect
import copy
import warnings
from pathlib import Path
from typing import Callable, List, Optional, Tuple, Union
import cv2
import numpy as np
import torch
import torch.nn as nn
import torchvision
from mmcv.transforms import Compose
from mmdet.evaluation import get_classes
from mmdet.utils import ConfigType
from mmengine.config import Config
from mmengine.registry import init_default_scope
from mmengine.runner import load_checkpoint
from mmengine.structures import InstanceData
from torch import Tensor
from mmyolo.registry import MODELS
try:
from pytorch_grad_cam import (AblationCAM, AblationLayer,
ActivationsAndGradients)
from pytorch_grad_cam import GradCAM as Base_GradCAM
from pytorch_grad_cam import GradCAMPlusPlus as Base_GradCAMPlusPlus
from pytorch_grad_cam.base_cam import BaseCAM
from pytorch_grad_cam.utils.image import scale_cam_image, show_cam_on_image
from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection
except ImportError:
pass
def init_detector(
config: Union[str, Path, Config],
checkpoint: Optional[str] = None,
palette: str = 'coco',
device: str = 'cuda:0',
cfg_options: Optional[dict] = None,
) -> nn.Module:
"""Initialize a detector from config file.
Args:
config (str, :obj:`Path`, or :obj:`mmengine.Config`): Config file path,
:obj:`Path`, or the config object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
palette (str): Color palette used for visualization. If palette
is stored in checkpoint, use checkpoint's palette first, otherwise
use externally passed palette. Currently, supports 'coco', 'voc',
'citys' and 'random'. Defaults to coco.
device (str): The device where the anchors will be put on.
Defaults to cuda:0.
cfg_options (dict, optional): Options to override some settings in
the used config.
Returns:
nn.Module: The constructed detector.
"""
if isinstance(config, (str, Path)):
config = Config.fromfile(config)
elif not isinstance(config, Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
if cfg_options is not None:
config.merge_from_dict(cfg_options)
elif 'init_cfg' in config.model.backbone:
config.model.backbone.init_cfg = None
# only change this
# grad based method requires train_cfg
# config.model.train_cfg = None
init_default_scope(config.get('default_scope', 'mmyolo'))
model = MODELS.build(config.model)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
# Weights converted from elsewhere may not have meta fields.
checkpoint_meta = checkpoint.get('meta', {})
# save the dataset_meta in the model for convenience
if 'dataset_meta' in checkpoint_meta:
# mmdet 3.x, all keys should be lowercase
model.dataset_meta = {
k.lower(): v
for k, v in checkpoint_meta['dataset_meta'].items()
}
elif 'CLASSES' in checkpoint_meta:
# < mmdet 3.x
classes = checkpoint_meta['CLASSES']
model.dataset_meta = {'classes': classes, 'palette': palette}
else:
warnings.simplefilter('once')
warnings.warn(
'dataset_meta or class names are not saved in the '
'checkpoint\'s meta data, use COCO classes by default.')
model.dataset_meta = {
'classes': get_classes('coco'),
'palette': palette
}
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def reshape_transform(feats: Union[Tensor, List[Tensor]],
max_shape: Tuple[int, int] = (20, 20),
is_need_grad: bool = False):
"""Reshape and aggregate feature maps when the input is a multi-layer
feature map.
Takes these tensors with different sizes, resizes them to a common shape,
and concatenates them.
"""
if len(max_shape) == 1:
max_shape = max_shape * 2
if isinstance(feats, torch.Tensor):
feats = [feats]
else:
if is_need_grad:
raise NotImplementedError('The `grad_base` method does not '
'support output multi-activation layers')
max_h = max([im.shape[-2] for im in feats])
max_w = max([im.shape[-1] for im in feats])
if -1 in max_shape:
max_shape = (max_h, max_w)
else:
max_shape = (min(max_h, max_shape[0]), min(max_w, max_shape[1]))
activations = []
for feat in feats:
activations.append(
torch.nn.functional.interpolate(
torch.abs(feat), max_shape, mode='bilinear'))
activations = torch.cat(activations, axis=1)
return activations
class BoxAMDetectorWrapper(nn.Module):
"""Wrap the mmdet model class to facilitate handling of non-tensor
situations during inference."""
def __init__(self,
cfg: ConfigType,
checkpoint: str,
score_thr: float,
device: str = 'cuda:0'):
super().__init__()
self.cfg = cfg
self.device = device
self.score_thr = score_thr
self.checkpoint = checkpoint
self.detector = init_detector(self.cfg, self.checkpoint, device=device)
pipeline_cfg = copy.deepcopy(self.cfg.test_dataloader.dataset.pipeline)
pipeline_cfg[0].type = 'mmdet.LoadImageFromNDArray'
new_test_pipeline = []
for pipeline in pipeline_cfg:
if not pipeline['type'].endswith('LoadAnnotations'):
new_test_pipeline.append(pipeline)
self.test_pipeline = Compose(new_test_pipeline)
self.is_need_loss = False
self.input_data = None
self.image = None
def need_loss(self, is_need_loss: bool):
"""Grad-based methods require loss."""
self.is_need_loss = is_need_loss
def set_input_data(self,
image: np.ndarray,
pred_instances: Optional[InstanceData] = None):
"""Set the input data to be used in the next step."""
self.image = image
if self.is_need_loss:
assert pred_instances is not None
pred_instances = pred_instances.numpy()
data = dict(
img=self.image,
img_id=0,
gt_bboxes=pred_instances.bboxes,
gt_bboxes_labels=pred_instances.labels)
data = self.test_pipeline(data)
else:
data = dict(img=self.image, img_id=0)
data = self.test_pipeline(data)
data['inputs'] = [data['inputs']]
data['data_samples'] = [data['data_samples']]
self.input_data = data
def __call__(self, *args, **kwargs):
assert self.input_data is not None
if self.is_need_loss:
# Maybe this is a direction that can be optimized
# self.detector.init_weights()
self.detector.bbox_head.head_module.training = True
if hasattr(self.detector.bbox_head, 'featmap_sizes'):
# Prevent the model algorithm error when calculating loss
self.detector.bbox_head.featmap_sizes = None
data_ = {}
data_['inputs'] = [self.input_data['inputs']]
data_['data_samples'] = [self.input_data['data_samples']]
data = self.detector.data_preprocessor(data_, training=False)
loss = self.detector._run_forward(data, mode='loss')
if hasattr(self.detector.bbox_head, 'featmap_sizes'):
self.detector.bbox_head.featmap_sizes = None
return [loss]
else:
self.detector.bbox_head.head_module.training = False
with torch.no_grad():
results = self.detector.test_step(self.input_data)
return results
class BoxAMDetectorVisualizer:
"""Box AM visualization class."""
def __init__(self,
method_class,
model: nn.Module,
target_layers: List,
reshape_transform: Optional[Callable] = None,
is_need_grad: bool = False,
extra_params: Optional[dict] = None):
self.target_layers = target_layers
self.reshape_transform = reshape_transform
self.is_need_grad = is_need_grad
if method_class.__name__ == 'AblationCAM':
batch_size = extra_params.get('batch_size', 1)
ratio_channels_to_ablate = extra_params.get(
'ratio_channels_to_ablate', 1.)
self.cam = AblationCAM(
model,
target_layers,
use_cuda=True if 'cuda' in model.device else False,
reshape_transform=reshape_transform,
batch_size=batch_size,
ablation_layer=extra_params['ablation_layer'],
ratio_channels_to_ablate=ratio_channels_to_ablate)
else:
self.cam = method_class(
model,
target_layers,
use_cuda=True if 'cuda' in model.device else False,
reshape_transform=reshape_transform,
)
if self.is_need_grad:
self.cam.activations_and_grads.release()
self.classes = model.detector.dataset_meta['classes']
self.COLORS = np.random.uniform(0, 255, size=(len(self.classes), 3))
def switch_activations_and_grads(self, model) -> None:
"""In the grad-based method, we need to switch
``ActivationsAndGradients`` layer, otherwise an error will occur."""
self.cam.model = model
if self.is_need_grad is True:
self.cam.activations_and_grads = ActivationsAndGradients(
model, self.target_layers, self.reshape_transform)
self.is_need_grad = False
else:
self.cam.activations_and_grads.release()
self.is_need_grad = True
def __call__(self, img, targets, aug_smooth=False, eigen_smooth=False):
img = torch.from_numpy(img)[None].permute(0, 3, 1, 2)
return self.cam(img, targets, aug_smooth, eigen_smooth)[0, :]
def show_am(self,
image: np.ndarray,
pred_instance: InstanceData,
grayscale_am: np.ndarray,
with_norm_in_bboxes: bool = False):
"""Normalize the AM to be in the range [0, 1] inside every bounding
boxes, and zero outside of the bounding boxes."""
boxes = pred_instance.bboxes
labels = pred_instance.labels
if with_norm_in_bboxes is True:
boxes = boxes.astype(np.int32)
renormalized_am = np.zeros(grayscale_am.shape, dtype=np.float32)
images = []
for x1, y1, x2, y2 in boxes:
img = renormalized_am * 0
img[y1:y2, x1:x2] = scale_cam_image(
[grayscale_am[y1:y2, x1:x2].copy()])[0]
images.append(img)
renormalized_am = np.max(np.float32(images), axis=0)
renormalized_am = scale_cam_image([renormalized_am])[0]
else:
renormalized_am = grayscale_am
am_image_renormalized = show_cam_on_image(
image / 255, renormalized_am, use_rgb=False)
image_with_bounding_boxes = self._draw_boxes(
boxes, labels, am_image_renormalized, pred_instance.get('scores'))
return image_with_bounding_boxes
def _draw_boxes(self,
boxes: List,
labels: List,
image: np.ndarray,
scores: Optional[List] = None):
"""draw boxes on image."""
for i, box in enumerate(boxes):
label = labels[i]
color = self.COLORS[label]
cv2.rectangle(image, (int(box[0]), int(box[1])),
(int(box[2]), int(box[3])), color, 2)
if scores is not None:
score = scores[i]
text = str(self.classes[label]) + ': ' + str(
round(score * 100, 1))
else:
text = self.classes[label]
cv2.putText(
image,
text, (int(box[0]), int(box[1] - 5)),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
color,
1,
lineType=cv2.LINE_AA)
return image
class DetAblationLayer(AblationLayer):
"""Det AblationLayer."""
def __init__(self):
super().__init__()
self.activations = None
def set_next_batch(self, input_batch_index, activations,
num_channels_to_ablate):
"""Extract the next batch member from activations, and repeat it
num_channels_to_ablate times."""
if isinstance(activations, torch.Tensor):
return super().set_next_batch(input_batch_index, activations,
num_channels_to_ablate)
self.activations = []
for activation in activations:
activation = activation[
input_batch_index, :, :, :].clone().unsqueeze(0)
self.activations.append(
activation.repeat(num_channels_to_ablate, 1, 1, 1))
def __call__(self, x):
"""Go over the activation indices to be ablated, stored in
self.indices."""
result = self.activations
if isinstance(result, torch.Tensor):
return super().__call__(x)
channel_cumsum = np.cumsum([r.shape[1] for r in result])
num_channels_to_ablate = result[0].size(0) # batch
for i in range(num_channels_to_ablate):
pyramid_layer = bisect.bisect_right(channel_cumsum,
self.indices[i])
if pyramid_layer > 0:
index_in_pyramid_layer = self.indices[i] - channel_cumsum[
pyramid_layer - 1]
else:
index_in_pyramid_layer = self.indices[i]
result[pyramid_layer][i, index_in_pyramid_layer, :, :] = -1000
return result
class DetBoxScoreTarget:
"""Det Score calculation class.
In the case of the grad-free method, the calculation method is that
for every original detected bounding box specified in "bboxes",
assign a score on how the current bounding boxes match it,
1. In Bbox IoU
2. In the classification score.
3. In Mask IoU if ``segms`` exist.
If there is not a large enough overlap, or the category changed,
assign a score of 0. The total score is the sum of all the box scores.
In the case of the grad-based method, the calculation method is
the sum of losses after excluding a specific key.
"""
def __init__(self,
pred_instance: InstanceData,
match_iou_thr: float = 0.5,
device: str = 'cuda:0',
ignore_loss_params: Optional[List] = None):
self.focal_bboxes = pred_instance.bboxes
self.focal_labels = pred_instance.labels
self.match_iou_thr = match_iou_thr
self.device = device
self.ignore_loss_params = ignore_loss_params
if ignore_loss_params is not None:
assert isinstance(self.ignore_loss_params, list)
def __call__(self, results):
output = torch.tensor([0.], device=self.device)
if 'loss_cls' in results:
# grad-based method
# results is dict
for loss_key, loss_value in results.items():
if 'loss' not in loss_key or \
loss_key in self.ignore_loss_params:
continue
if isinstance(loss_value, list):
output += sum(loss_value)
else:
output += loss_value
return output
else:
# grad-free method
# results is DetDataSample
pred_instances = results.pred_instances
if len(pred_instances) == 0:
return output
pred_bboxes = pred_instances.bboxes
pred_scores = pred_instances.scores
pred_labels = pred_instances.labels
for focal_box, focal_label in zip(self.focal_bboxes,
self.focal_labels):
ious = torchvision.ops.box_iou(focal_box[None],
pred_bboxes[..., :4])
index = ious.argmax()
if ious[0, index] > self.match_iou_thr and pred_labels[
index] == focal_label:
# TODO: Adaptive adjustment of weights based on algorithms
score = ious[0, index] + pred_scores[index]
output = output + score
return output
class SpatialBaseCAM(BaseCAM):
"""CAM that maintains spatial information.
Gradients are often averaged over the spatial dimension in CAM
visualization for classification, but this is unreasonable in detection
tasks. There is no need to average the gradients in the detection task.
"""
def get_cam_image(self,
input_tensor: torch.Tensor,
target_layer: torch.nn.Module,
targets: List[torch.nn.Module],
activations: torch.Tensor,
grads: torch.Tensor,
eigen_smooth: bool = False) -> np.ndarray:
weights = self.get_cam_weights(input_tensor, target_layer, targets,
activations, grads)
weighted_activations = weights * activations
if eigen_smooth:
cam = get_2d_projection(weighted_activations)
else:
cam = weighted_activations.sum(axis=1)
return cam
class GradCAM(SpatialBaseCAM, Base_GradCAM):
"""Gradients are no longer averaged over the spatial dimension."""
def get_cam_weights(self, input_tensor, target_layer, target_category,
activations, grads):
return grads
class GradCAMPlusPlus(SpatialBaseCAM, Base_GradCAMPlusPlus):
"""Gradients are no longer averaged over the spatial dimension."""
def get_cam_weights(self, input_tensor, target_layers, target_category,
activations, grads):
grads_power_2 = grads**2
grads_power_3 = grads_power_2 * grads
# Equation 19 in https://arxiv.org/abs/1710.11063
sum_activations = np.sum(activations, axis=(2, 3))
eps = 0.000001
aij = grads_power_2 / (
2 * grads_power_2 +
sum_activations[:, :, None, None] * grads_power_3 + eps)
# Now bring back the ReLU from eq.7 in the paper,
# And zero out aijs where the activations are 0
aij = np.where(grads != 0, aij, 0)
weights = np.maximum(grads, 0) * aij
return weights
| 19,429 | 36.875244 | 79 | py |
DALLE-pytorch | DALLE-pytorch-main/train_dalle.py | import argparse
from pathlib import Path
import time
from glob import glob
import os
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from dalle_pytorch import __version__
from dalle_pytorch import OpenAIDiscreteVAE, VQGanVAE, DiscreteVAE, DALLE
from dalle_pytorch import distributed_utils
from dalle_pytorch.loader import TextImageDataset
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, ChineseTokenizer, YttmTokenizer
# libraries needed for webdataset support
import webdataset as wds
from torchvision import transforms as T
from PIL import Image
from io import BytesIO
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--vae_path', type=str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type=str,
help='path to your partially trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--image_text_folder', type=str, required=True,
help='path to your folder of images and text for learning the DALL-E')
parser.add_argument('--wds', type = str, default='',
help = 'Comma separated list of WebDataset (1) image and (2) text column names. Must contain 2 values, e.g. img,cap.')
parser.add_argument('--truncate_captions', dest='truncate_captions', action='store_true',
help='Captions passed in which exceed the max token length will be truncated if this is set.')
parser.add_argument('--random_resize_crop_lower_ratio', dest='resize_ratio', type=float, default=0.75,
help='Random resized crop lower ratio')
parser.add_argument('--chinese', dest='chinese', action='store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--hug', dest='hug', action='store_true')
parser.add_argument('--bpe_path', type=str,
help='path to your BPE json file')
parser.add_argument('--dalle_output_file_name', type=str, default = "dalle",
help='output_file_name')
parser.add_argument('--fp16', action='store_true',
help='(experimental) - Enable DeepSpeed 16 bit precision. Reduces VRAM.')
parser.add_argument('--amp', action='store_true',
help='Apex "O1" automatic mixed precision. More stable than 16 bit precision. Can\'t be used in conjunction with deepspeed zero stages 1-3.')
parser.add_argument('--wandb_name', default='dalle_train_transformer',
help='Name W&B will use when saving results.\ne.g. `--wandb_name "coco2017-full-sparse"`')
parser.add_argument('--wandb_entity', default=None,
help='(optional) Name of W&B team/entity to log to.')
parser.add_argument('--stable_softmax', dest='stable_softmax', action='store_true',
help='Prevent values from becoming too large during softmax. Helps with stability in fp16 and Mixture of Quantization training.')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--flops_profiler', dest = 'flops_profiler', action='store_true', help = 'Exits after printing detailed flops/runtime analysis of forward/backward')
train_group.add_argument('--epochs', default = 20, type = int, help = 'Number of epochs')
train_group.add_argument('--save_every_n_steps', default = 1000, type = int, help = 'Save a checkpoint every n steps')
train_group.add_argument('--keep_n_checkpoints', default = None, type = int, help = '(Careful) Deletes old deepspeed checkpoints if there are more than n')
train_group.add_argument('--batch_size', default = 4, type = int, help = 'Batch size')
train_group.add_argument('--ga_steps', default = 1, type = int, help = 'Number of steps to accumulate gradients across per each iteration. DeepSpeed only.')
train_group.add_argument('--learning_rate', default = 3e-4, type = float, help = 'Learning rate')
train_group.add_argument('--clip_grad_norm', default = 0.5, type = float, help = 'Clip gradient norm')
train_group.add_argument('--lr_decay', dest = 'lr_decay', action = 'store_true')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--dim', default = 512, type = int, help = 'Model dimension')
model_group.add_argument('--text_seq_len', default = 256, type = int, help = 'Text sequence length')
model_group.add_argument('--depth', default = 2, type = int, help = 'Model depth')
model_group.add_argument('--heads', default = 8, type = int, help = 'Model number of heads')
model_group.add_argument('--dim_head', default = 64, type = int, help = 'Model head dimension')
train_group.add_argument('--ff_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
train_group.add_argument('--attn_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
model_group.add_argument('--reversible', dest = 'reversible', action='store_true')
model_group.add_argument('--loss_img_weight', default = 7, type = int, help = 'Image loss weight')
model_group.add_argument('--attn_types', default = 'full', type = str, help = 'comma separated list of attention types. attention type can be: full or sparse or axial_row or axial_col or conv_like.')
model_group.add_argument('--shift_tokens', help = 'Use the shift tokens feature', action = 'store_true')
model_group.add_argument('--rotary_emb', help = 'Use rotary embeddings', action = 'store_true')
model_group.add_argument('--shared_attn_ids', default = None, type = str, help = 'Comma separated list of shared attention layer ids. Default: sharing is disabled')
model_group.add_argument('--shared_ff_ids', default = None, type = str, help = 'Comma separated list of shared feed forward layer ids. Default: sharing is disabled')
model_group.add_argument('--share_input_output_emb', help = 'Share input and output embeddings', action = 'store_true')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
def get_trainable_params(model):
return [params for params in model.parameters() if params.requires_grad]
def cp_path_to_dir(cp_path, tag):
"""Convert a checkpoint path to a directory with `tag` inserted.
If `cp_path` is already a directory, return it unchanged.
"""
if not isinstance(cp_path, Path):
cp_path = Path(cp_path)
if cp_path.is_dir():
return cp_path
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = Path(f'{path_sans_extension}-{tag}-cp')
return cp_dir
# constants
WEBDATASET_IMAGE_TEXT_COLUMNS = tuple(args.wds.split(','))
ENABLE_WEBDATASET = True if len(WEBDATASET_IMAGE_TEXT_COLUMNS) == 2 else False
DALLE_OUTPUT_FILE_NAME = args.dalle_output_file_name + ".pt"
VAE_PATH = args.vae_path
VQGAN_MODEL_PATH = args.vqgan_model_path
VQGAN_CONFIG_PATH = args.vqgan_config_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
GRAD_CLIP_NORM = args.clip_grad_norm
LR_DECAY = args.lr_decay
SAVE_EVERY_N_STEPS = args.save_every_n_steps
KEEP_N_CHECKPOINTS = args.keep_n_checkpoints
MODEL_DIM = args.dim
TEXT_SEQ_LEN = args.text_seq_len
DEPTH = args.depth
HEADS = args.heads
DIM_HEAD = args.dim_head
REVERSIBLE = args.reversible
LOSS_IMG_WEIGHT = args.loss_img_weight
FF_DROPOUT = args.ff_dropout
ATTN_DROPOUT = args.attn_dropout
STABLE = args.stable_softmax
SHIFT_TOKENS = args.shift_tokens
ROTARY_EMB = args.rotary_emb
ATTN_TYPES = tuple(args.attn_types.split(','))
SHARED_ATTN_IDS = tuple(args.shared_attn_ids.split(',')) if exists(args.shared_attn_ids) else None
SHARED_FF_IDS = tuple(args.shared_ff_ids.split(',')) if exists(args.shared_ff_ids) else None
SHARE_INPUT_OUTPUT_EMB = args.share_input_output_emb
DEEPSPEED_CP_AUX_FILENAME = 'auxiliary.pt'
if not ENABLE_WEBDATASET:
# quit early if you used the wrong folder name
assert Path(args.image_text_folder).exists(), f'The path {args.image_text_folder} was not found.'
else:
# quit early if no tar files were found
if Path(args.image_text_folder).is_dir():
DATASET = [str(p) for p in Path(args.image_text_folder).glob("**/*") if ".tar" in str(p).lower()] # .name
assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(args.image_text_folder)
print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), args.image_text_folder))
elif ('http://' in args.image_text_folder.lower()) | ('https://' in args.image_text_folder.lower()):
DATASET = f"pipe:curl -L -s {args.image_text_folder} || true"
print('Found {} http(s) link under given path!'.format(len(DATASET), args.image_text_folder))
elif 'gs://' in args.image_text_folder.lower():
DATASET = f"pipe:gsutil cat {args.image_text_folder} || true"
print('Found {} GCS link under given path!'.format(len(DATASET), args.image_text_folder))
elif '.tar' in args.image_text_folder:
DATASET = args.image_text_folder
print('Found WebDataset .tar(.gz) file under given path {}!'.format(args.image_text_folder))
else:
raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(args.image_text_folder))
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
is_root = distr_backend.is_root_worker()
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
if using_deepspeed:
cp_dir = cp_path_to_dir(dalle_path, 'ds')
assert cp_dir.is_dir(), \
f'DeepSpeed checkpoint directory {cp_dir} not found'
dalle_path = cp_dir / DEEPSPEED_CP_AUX_FILENAME
else:
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path), map_location='cpu')
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
opt_state = loaded_obj.get('opt_state')
scheduler_state = loaded_obj.get('scheduler_state')
if vae_params is not None:
vae = DiscreteVAE(**vae_params)
elif args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
resume_epoch = loaded_obj.get('epoch', 0)
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
assert not vae_path.is_dir(), \
('Cannot load VAE model from directory; please use a '
'standard *.pt checkpoint. '
'Currently, merging a DeepSpeed-partitioned VAE into a DALLE '
'model is not supported.')
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
if is_root:
print('using pretrained VAE for encoding images to tokens')
vae_params = None
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
dalle_params = dict(
num_text_tokens=tokenizer.vocab_size,
text_seq_len=TEXT_SEQ_LEN,
dim=MODEL_DIM,
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD,
reversible=REVERSIBLE,
loss_img_weight=LOSS_IMG_WEIGHT,
attn_types=ATTN_TYPES,
ff_dropout=FF_DROPOUT,
attn_dropout=ATTN_DROPOUT,
stable=STABLE,
shift_tokens=SHIFT_TOKENS,
rotary_emb=ROTARY_EMB,
shared_attn_ids=SHARED_ATTN_IDS,
shared_ff_ids=SHARED_FF_IDS,
share_input_output_emb=SHARE_INPUT_OUTPUT_EMB,
)
resume_epoch = 0
IMAGE_SIZE = vae.image_size
CHANNELS = vae.channels
TRANSPARENT = CHANNELS == 4
IMAGE_MODE = 'RGBA' if CHANNELS == 4 else 'RGB'
# configure OpenAI VAE for float16s
if isinstance(vae, OpenAIDiscreteVAE) and args.fp16:
vae.enc.blocks.output.conv.use_float16 = True
# helpers
def group_weight(model):
group_decay, group_no_decay = [], []
for params in model.named_parameters():
if 'transformer' in params[0]:
if 'bias' in params[0] or 'norm' in params[0]:
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert len(list(model.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
# create dataset and dataloader
is_shuffle = not distributed_utils.using_backend(distributed_utils.HorovodBackend)
imagepreproc = T.Compose([
T.Lambda(lambda img: img.convert(IMAGE_MODE)
if img.mode != IMAGE_MODE else img),
T.RandomResizedCrop(IMAGE_SIZE,
scale=(args.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor(),
])
def imagetransform(b):
return Image.open(BytesIO(b))
def tokenize(s):
return tokenizer.tokenize(
s.decode('utf-8'),
TEXT_SEQ_LEN,
truncate_text=args.truncate_captions).squeeze(0)
if ENABLE_WEBDATASET:
DATASET_SIZE = int(1e9) # You need to set a nominal length for the Dataset in order to avoid warnings from DataLoader
myimg, mycap = WEBDATASET_IMAGE_TEXT_COLUMNS
image_text_mapping = {
myimg: imagetransform,
mycap: tokenize
}
image_mapping = {
myimg: imagepreproc
}
def filter_dataset(item): # For e.g. C@H which (rarely) has no caption available.
if mycap not in item:
return False
if myimg not in item:
return False
return True
w_dataset = wds.WebDataset(DATASET, handler=wds.warn_and_continue)
filtered_dataset = w_dataset.select(filter_dataset)
ds = filtered_dataset.map_dict(**image_text_mapping).map_dict(**image_mapping).to_tuple(mycap, myimg).batched(BATCH_SIZE / distr_backend.get_world_size(), partial=True)
else:
ds = TextImageDataset(
args.image_text_folder,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
transparent=TRANSPARENT,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
)
assert len(ds) > 0, 'dataset is empty'
if is_root:
if not ENABLE_WEBDATASET:
print(f'{len(ds)} image-text pairs found for training')
# data sampler
data_sampler = None
if not is_shuffle:
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds,
num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank()
)
# WebLoader for WebDataset and DeepSpeed compatibility
if ENABLE_WEBDATASET:
dl = wds.WebLoader(ds, batch_size=None, shuffle=False, num_workers=4) # optionally add num_workers=2 (n) argument
number_of_batches = DATASET_SIZE // (BATCH_SIZE * distr_backend.get_world_size())
dl = dl.slice(number_of_batches)
dl.length = number_of_batches
else:
# Regular DataLoader for image-text-folder datasets
dl = DataLoader(ds, batch_size=BATCH_SIZE, shuffle=is_shuffle, drop_last=True, sampler=data_sampler)
# initialize DALL-E
dalle = DALLE(vae=vae, **dalle_params)
if not using_deepspeed:
if args.fp16:
dalle = dalle.half()
dalle = dalle.cuda()
if RESUME and not using_deepspeed:
dalle.load_state_dict(weights)
# optimizer
opt = Adam(get_trainable_params(dalle), lr=LEARNING_RATE)
if RESUME and opt_state:
opt.load_state_dict(opt_state)
# scheduler
scheduler = None
if LR_DECAY:
scheduler = ReduceLROnPlateau(
opt,
mode="min",
factor=0.5,
patience=10,
cooldown=10,
min_lr=1e-6,
verbose=True,
)
if RESUME and scheduler_state:
scheduler.load_state_dict(scheduler_state)
# experiment tracker
if is_root:
model_config = dict(
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD
)
run = wandb.init(
project=args.wandb_name,
entity=args.wandb_entity,
resume=False,
config=model_config,
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {
'train_batch_size': BATCH_SIZE,
'gradient_accumulation_steps': args.ga_steps,
'gradient_clipping': GRAD_CLIP_NORM,
'fp16': {
'enabled': args.fp16,
},
'amp': {
'enabled': args.amp,
'opt_level': 'O1',
},
"flops_profiler": {
"enabled": args.flops_profiler,
"profile_step": 200,
"module_depth": -1,
"top_modules": 1,
"detailed": True,
"output_file": None # TODO Can't get this to work.
},
}
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2:
print(f"Checkpoints made with DeepSpeed ZeRO Stages 2 and 3 will be stored in deepspeed checkpoint folder")
print(f"As such, they will require DeepSpeed as a dependency in order to resume from or generate with.")
print("See the deespeed conversion script for details on how to convert your ZeRO stage 2/3 checkpoint to a single file.")
print("If using a single GPU, consider running with apex automatic mixed precision instead for a similar speedup to ZeRO.")
time.sleep(2)
(distr_dalle, distr_opt, distr_dl, distr_scheduler) = distr_backend.distribute(
args=args,
model=dalle,
optimizer=opt,
model_parameters=get_trainable_params(dalle),
training_data=(
(None if ENABLE_WEBDATASET else ds)
if using_deepspeed
else dl
),
# Do not pass the LR scheduler to DeepSpeed so we can manually
# advance it.
lr_scheduler=scheduler if LR_DECAY and not using_deepspeed else None,
config_params=deepspeed_config,
)
# Prefer scheduler in `deepspeed_config`.
if LR_DECAY and distr_scheduler is None:
distr_scheduler = scheduler
avoid_model_calls = using_deepspeed and args.fp16
if RESUME and using_deepspeed:
distr_dalle.load_checkpoint(str(cp_dir))
def save_model(path, epoch=0):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'epoch': epoch,
'version': __version__,
'vae_class_name': vae.__class__.__name__
}
if using_deepspeed:
cp_dir = cp_path_to_dir(path, 'ds')
if KEEP_N_CHECKPOINTS is not None and is_root:
checkpoints = sorted(glob(str(cp_dir / "global*")), key=os.path.getmtime, reverse=True)
for checkpoint in checkpoints[KEEP_N_CHECKPOINTS:]:
shutil.rmtree(checkpoint)
distr_dalle.save_checkpoint(cp_dir, client_state=save_obj)
if not is_root:
return
# Save auxiliary values so we can reuse the standard routine
# for loading.
save_obj = {
**save_obj,
# Save a nonsense value that directs the user to
# further help.
'weights': (
'To get a working standard checkpoint, '
'look into consolidating DeepSpeed checkpoints.'
),
}
torch.save(save_obj, str(cp_dir / DEEPSPEED_CP_AUX_FILENAME))
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2: # see https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
return
if not is_root:
return
save_obj = {
**save_obj,
'weights': dalle.state_dict(),
'opt_state': opt.state_dict(),
'scheduler_state': (scheduler.state_dict() if scheduler else None)
}
torch.save(save_obj, path)
def save_artifact(model_config, model_path, name = 'trained-dalle'):
model_artifact = wandb.Artifact(name, type='model', metadata=dict(model_config))
model_artifact.add_file(model_path)
run.log_artifact(model_artifact)
# training
# Saves a checkpoint before training begins to fail early when mis-configured.
# See https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
save_model(DALLE_OUTPUT_FILE_NAME, epoch=resume_epoch)
for epoch in range(resume_epoch, EPOCHS):
if data_sampler:
data_sampler.set_epoch(epoch)
for i, (text, images) in enumerate((dl if ENABLE_WEBDATASET else distr_dl)):
if i % 10 == 0 and is_root:
t = time.time()
if args.fp16:
images = images.half()
text, images = map(lambda t: t.cuda(), (text, images))
loss = distr_dalle(text, images, return_loss=True)
if using_deepspeed:
distr_dalle.backward(loss)
distr_dalle.step()
# Gradients are automatically zeroed after the step
else:
loss.backward()
clip_grad_norm_(distr_dalle.parameters(), GRAD_CLIP_NORM)
distr_opt.step()
distr_opt.zero_grad()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
log = {}
if i % 10 == 0 and is_root:
print(epoch, i, f'loss - {avg_loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item()
}
if i % SAVE_EVERY_N_STEPS == 0:
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if i % 100 == 0 and is_root:
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
if not avoid_model_calls:
# CUDA index errors when we don't guard this
image = dalle.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9
if not avoid_model_calls:
log['image'] = wandb.Image(image, caption=decoded_text)
if i % 10 == 9 and is_root:
sample_per_sec = BATCH_SIZE * 10 / (time.time() - t)
log["sample_per_sec"] = sample_per_sec
print(epoch, i, f'sample_per_sec - {sample_per_sec}')
if i == 201 and args.flops_profiler:
raise StopIteration("Profiler has finished running. Stopping training early.")
if is_root:
wandb.log(log)
if LR_DECAY:
distr_scheduler.step(avg_loss)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if is_root:
# save trained model to wandb as an artifact every epoch's end
save_artifact(model_config, DALLE_OUTPUT_FILE_NAME)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if is_root:
wandb.save(DALLE_OUTPUT_FILE_NAME)
save_artifact(model_config, DALLE_OUTPUT_FILE_NAME)
wandb.finish()
| 23,672 | 33.967504 | 199 | py |
DALLE-pytorch | DALLE-pytorch-main/setup.py | from setuptools import setup, find_packages
exec(open('dalle_pytorch/version.py').read())
setup(
name = 'dalle-pytorch',
packages = find_packages(),
include_package_data = True,
version = __version__,
license='MIT',
description = 'DALL-E - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/dalle-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism',
'transformers',
'text-to-image'
],
install_requires=[
'axial_positional_embedding',
'DALL-E',
'einops>=0.3.2',
'ftfy',
'packaging',
'pillow',
'regex',
'rotary-embedding-torch',
'taming-transformers-rom1504',
'tokenizers',
'torch>=1.6',
'torchvision',
'transformers',
'tqdm',
'youtokentome',
'WebDataset'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| 1,149 | 23.468085 | 65 | py |
DALLE-pytorch | DALLE-pytorch-main/generate.py | import argparse
from pathlib import Path
from tqdm import tqdm
# torch
import torch
from einops import repeat
# vision imports
from PIL import Image
from torchvision.utils import make_grid, save_image
# dalle related classes and utils
from dalle_pytorch import __version__
from dalle_pytorch import DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE, DALLE
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, YttmTokenizer, ChineseTokenizer
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--dalle_path', type = str, required = True,
help='path to your trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--text', type = str, required = True,
help='your text prompt')
parser.add_argument('--num_images', type = int, default = 128, required = False,
help='number of images')
parser.add_argument('--batch_size', type = int, default = 4, required = False,
help='batch size')
parser.add_argument('--top_k', type = float, default = 0.9, required = False,
help='top k filter threshold')
parser.add_argument('--outputs_dir', type = str, default = './outputs', required = False,
help='output directory')
parser.add_argument('--bpe_path', type = str,
help='path to your huggingface BPE json file')
parser.add_argument('--hug', dest='hug', action = 'store_true')
parser.add_argument('--chinese', dest='chinese', action = 'store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--gentxt', dest='gentxt', action='store_true')
args = parser.parse_args()
# helper fns
def exists(val):
return val is not None
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# load DALL-E
dalle_path = Path(args.dalle_path)
assert dalle_path.exists(), 'trained DALL-E must exist'
load_obj = torch.load(str(dalle_path))
dalle_params, vae_params, weights, vae_class_name, version = load_obj.pop('hparams'), load_obj.pop('vae_params'), load_obj.pop('weights'), load_obj.pop('vae_class_name', None), load_obj.pop('version', None)
# friendly print
if exists(version):
print(f'Loading a model trained with DALLE-pytorch version {version}')
else:
print('You are loading a model trained on an older version of DALL-E pytorch - it may not be compatible with the most recent version')
# load VAE
if args.taming:
vae = VQGanVAE(args.vqgan_model_path, args.vqgan_config_path)
elif vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
vae = OpenAIDiscreteVAE()
assert not (exists(vae_class_name) and vae.__class__.__name__ != vae_class_name), f'you trained DALL-E using {vae_class_name} but are trying to generate with {vae.__class__.__name__} - please make sure you are passing in the correct paths and settings for the VAE to use for generation'
# reconstitute DALL-E
dalle = DALLE(vae = vae, **dalle_params).cuda()
dalle.load_state_dict(weights)
# generate images
image_size = vae.image_size
texts = args.text.split('|')
for j, text in tqdm(enumerate(texts)):
if args.gentxt:
text_tokens, gen_texts = dalle.generate_texts(tokenizer, text=text, filter_thres = args.top_k)
text = gen_texts[0]
else:
text_tokens = tokenizer.tokenize([text], dalle.text_seq_len).cuda()
text_tokens = repeat(text_tokens, '() n -> b n', b = args.num_images)
outputs = []
for text_chunk in tqdm(text_tokens.split(args.batch_size), desc = f'generating images for - {text}'):
output = dalle.generate_images(text_chunk, filter_thres = args.top_k)
outputs.append(output)
outputs = torch.cat(outputs)
# save all images
file_name = text
outputs_dir = Path(args.outputs_dir) / file_name.replace(' ', '_')[:(100)]
outputs_dir.mkdir(parents = True, exist_ok = True)
for i, image in tqdm(enumerate(outputs), desc = 'saving images'):
save_image(image, outputs_dir / f'{i}.png', normalize=True)
with open(outputs_dir / 'caption.txt', 'w') as f:
f.write(file_name)
print(f'created {args.num_images} images at "{str(outputs_dir)}"')
| 4,695 | 31.611111 | 286 | py |
DALLE-pytorch | DALLE-pytorch-main/train_vae.py | import math
from math import sqrt
import argparse
from pathlib import Path
# torch
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import ExponentialLR
# vision imports
from torchvision import transforms as T
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle classes and utils
from dalle_pytorch import distributed_utils
from dalle_pytorch import DiscreteVAE
# argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--image_folder', type = str, required = True,
help='path to your folder of images for learning the discrete VAE and its codebook')
parser.add_argument('--image_size', type = int, required = False, default = 128,
help='image size')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--epochs', type = int, default = 20, help = 'number of epochs')
train_group.add_argument('--batch_size', type = int, default = 8, help = 'batch size')
train_group.add_argument('--learning_rate', type = float, default = 1e-3, help = 'learning rate')
train_group.add_argument('--lr_decay_rate', type = float, default = 0.98, help = 'learning rate decay')
train_group.add_argument('--starting_temp', type = float, default = 1., help = 'starting temperature')
train_group.add_argument('--temp_min', type = float, default = 0.5, help = 'minimum temperature to anneal to')
train_group.add_argument('--anneal_rate', type = float, default = 1e-6, help = 'temperature annealing rate')
train_group.add_argument('--num_images_save', type = int, default = 4, help = 'number of images to save')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--num_tokens', type = int, default = 8192, help = 'number of image tokens')
model_group.add_argument('--num_layers', type = int, default = 3, help = 'number of layers (should be 3 or above)')
model_group.add_argument('--num_resnet_blocks', type = int, default = 2, help = 'number of residual net blocks')
model_group.add_argument('--smooth_l1_loss', dest = 'smooth_l1_loss', action = 'store_true')
model_group.add_argument('--emb_dim', type = int, default = 512, help = 'embedding dimension')
model_group.add_argument('--hidden_dim', type = int, default = 256, help = 'hidden dimension')
model_group.add_argument('--kl_loss_weight', type = float, default = 0., help = 'KL loss weight')
model_group.add_argument('--transparent', dest = 'transparent', action = 'store_true')
args = parser.parse_args()
# constants
IMAGE_SIZE = args.image_size
IMAGE_PATH = args.image_folder
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
LR_DECAY_RATE = args.lr_decay_rate
NUM_TOKENS = args.num_tokens
NUM_LAYERS = args.num_layers
NUM_RESNET_BLOCKS = args.num_resnet_blocks
SMOOTH_L1_LOSS = args.smooth_l1_loss
EMB_DIM = args.emb_dim
HIDDEN_DIM = args.hidden_dim
KL_LOSS_WEIGHT = args.kl_loss_weight
TRANSPARENT = args.transparent
CHANNELS = 4 if TRANSPARENT else 3
IMAGE_MODE = 'RGBA' if TRANSPARENT else 'RGB'
STARTING_TEMP = args.starting_temp
TEMP_MIN = args.temp_min
ANNEAL_RATE = args.anneal_rate
NUM_IMAGES_SAVE = args.num_images_save
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# data
ds = ImageFolder(
IMAGE_PATH,
T.Compose([
T.Lambda(lambda img: img.convert(IMAGE_MODE) if img.mode != IMAGE_MODE else img),
T.Resize(IMAGE_SIZE),
T.CenterCrop(IMAGE_SIZE),
T.ToTensor()
])
)
if distributed_utils.using_backend(distributed_utils.HorovodBackend):
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds, num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank())
else:
data_sampler = None
dl = DataLoader(ds, BATCH_SIZE, shuffle = not data_sampler, sampler=data_sampler)
vae_params = dict(
image_size = IMAGE_SIZE,
num_layers = NUM_LAYERS,
num_tokens = NUM_TOKENS,
channels = CHANNELS,
codebook_dim = EMB_DIM,
hidden_dim = HIDDEN_DIM,
num_resnet_blocks = NUM_RESNET_BLOCKS
)
vae = DiscreteVAE(
**vae_params,
smooth_l1_loss = SMOOTH_L1_LOSS,
kl_div_loss_weight = KL_LOSS_WEIGHT
)
if not using_deepspeed:
vae = vae.cuda()
assert len(ds) > 0, 'folder does not contain any images'
if distr_backend.is_root_worker():
print(f'{len(ds)} images found for training')
# optimizer
opt = Adam(vae.parameters(), lr = LEARNING_RATE)
sched = ExponentialLR(optimizer = opt, gamma = LR_DECAY_RATE)
if distr_backend.is_root_worker():
# weights & biases experiment tracking
import wandb
model_config = dict(
num_tokens = NUM_TOKENS,
smooth_l1_loss = SMOOTH_L1_LOSS,
num_resnet_blocks = NUM_RESNET_BLOCKS,
kl_loss_weight = KL_LOSS_WEIGHT
)
run = wandb.init(
project = 'dalle_train_vae',
job_type = 'train_model',
config = model_config
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {'train_batch_size': BATCH_SIZE}
(distr_vae, distr_opt, distr_dl, distr_sched) = distr_backend.distribute(
args=args,
model=vae,
optimizer=opt,
model_parameters=vae.parameters(),
training_data=ds if using_deepspeed else dl,
lr_scheduler=sched if not using_deepspeed else None,
config_params=deepspeed_config,
)
using_deepspeed_sched = False
# Prefer scheduler in `deepspeed_config`.
if distr_sched is None:
distr_sched = sched
elif using_deepspeed:
# We are using a DeepSpeed LR scheduler and want to let DeepSpeed
# handle its scheduling.
using_deepspeed_sched = True
def save_model(path):
save_obj = {
'hparams': vae_params,
}
if using_deepspeed:
cp_path = Path(path)
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = str(path_sans_extension) + '-ds-cp'
distr_vae.save_checkpoint(cp_dir, client_state=save_obj)
# We do not return so we do get a "normal" checkpoint to refer to.
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': vae.state_dict()
}
torch.save(save_obj, path)
# starting temperature
global_step = 0
temp = STARTING_TEMP
for epoch in range(EPOCHS):
for i, (images, _) in enumerate(distr_dl):
images = images.cuda()
loss, recons = distr_vae(
images,
return_loss = True,
return_recons = True,
temp = temp
)
if using_deepspeed:
# Gradients are automatically zeroed after the step
distr_vae.backward(loss)
distr_vae.step()
else:
distr_opt.zero_grad()
loss.backward()
distr_opt.step()
logs = {}
if i % 100 == 0:
if distr_backend.is_root_worker():
k = NUM_IMAGES_SAVE
with torch.no_grad():
codes = vae.get_codebook_indices(images[:k])
hard_recons = vae.decode(codes)
images, recons = map(lambda t: t[:k], (images, recons))
images, recons, hard_recons, codes = map(lambda t: t.detach().cpu(), (images, recons, hard_recons, codes))
images, recons, hard_recons = map(lambda t: make_grid(t.float(), nrow = int(sqrt(k)), normalize = True, range = (-1, 1)), (images, recons, hard_recons))
logs = {
**logs,
'sample images': wandb.Image(images, caption = 'original images'),
'reconstructions': wandb.Image(recons, caption = 'reconstructions'),
'hard reconstructions': wandb.Image(hard_recons, caption = 'hard reconstructions'),
'codebook_indices': wandb.Histogram(codes),
'temperature': temp
}
wandb.save('./vae.pt')
save_model(f'./vae.pt')
# temperature anneal
temp = max(temp * math.exp(-ANNEAL_RATE * global_step), TEMP_MIN)
# lr decay
# Do not advance schedulers from `deepspeed_config`.
if not using_deepspeed_sched:
distr_sched.step()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
if distr_backend.is_root_worker():
if i % 10 == 0:
lr = distr_sched.get_last_lr()[0]
print(epoch, i, f'lr - {lr:6f} loss - {avg_loss.item()}')
logs = {
**logs,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item(),
'lr': lr
}
wandb.log(logs)
global_step += 1
if distr_backend.is_root_worker():
# save trained model to wandb as an artifact every epoch's end
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae.pt')
run.log_artifact(model_artifact)
if distr_backend.is_root_worker():
# save final vae and cleanup
save_model('./vae-final.pt')
wandb.save('./vae-final.pt')
model_artifact = wandb.Artifact('trained-vae', type = 'model', metadata = dict(model_config))
model_artifact.add_file('vae-final.pt')
run.log_artifact(model_artifact)
wandb.finish()
| 9,727 | 29.117647 | 168 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/reversible.py | import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}):
super().__init__()
self.args_route = args_route
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).mean(dim=0)
| 5,390 | 33.120253 | 165 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/dalle_pytorch.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from dalle_pytorch.transformer import Transformer, DivideMax
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return self.val
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def prob_mask_like(shape, prob, device):
return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / temperature) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class SharedEmbedding(nn.Embedding):
def __init__(self, linear, start_index, end_index, **kwargs):
super().__init__(end_index - start_index, linear.weight.shape[1], **kwargs)
del self.weight
self.linear = linear
self.start_index = start_index
self.end_index = end_index
def forward(self, input):
return F.embedding(
input, self.linear.weight[self.start_index:self.end_index], self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
reinmax = False,
kl_div_loss_weight = 0.,
normalization = ((*((0.5,) * 3), 0), (*((0.5,) * 3), 1))
):
super().__init__()
assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.channels = channels
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.reinmax = reinmax
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = tuple(map(lambda t: t[:channels], normalization))
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
if self.straight_through and self.reinmax:
# use reinmax for better second-order accuracy - https://arxiv.org/abs/2304.08612
# algorithm 2
one_hot = one_hot.detach()
π0 = logits.softmax(dim = 1)
π1 = (one_hot + (logits / temp).softmax(dim = 1)) / 2
π1 = ((log(π1) - logits).detach() + logits).softmax(dim = 1)
π2 = 2 * π1 - 0.5 * π0
one_hot = π2 - π2.detach() + one_hot
sampled = einsum('b n h w, n d -> b d h w', one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
num_visual_tokens = 512,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
channels = 3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads, rotary_emb = False)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads, rotary_emb = False)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device = device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens = 10000,
text_seq_len = 256,
depth,
heads = 8,
dim_head = 64,
reversible = False,
attn_dropout = 0.,
ff_dropout = 0,
sparse_attn = False,
attn_types = None,
loss_img_weight = 7,
stable = False,
sandwich_norm = False,
shift_tokens = True,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
share_input_output_emb = False,
optimize_for_inference = False,
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) if not rotary_emb else always(0) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_fmap_size, image_fmap_size)) if not rotary_emb else always(0)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim = dim,
causal = True,
seq_len = seq_len,
depth = depth,
heads = heads,
dim_head = dim_head,
reversible = reversible,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
attn_types = attn_types,
image_fmap_size = image_fmap_size,
sparse_attn = sparse_attn,
stable = stable,
sandwich_norm = sandwich_norm,
shift_tokens = shift_tokens,
rotary_emb = rotary_emb,
shared_attn_ids = shared_attn_ids,
shared_ff_ids = shared_ff_ids,
optimize_for_inference = optimize_for_inference,
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim = -1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
if share_input_output_emb:
self.text_emb = SharedEmbedding(self.to_logits[1], 0, num_text_tokens)
self.image_emb = SharedEmbedding(self.to_logits[1], num_text_tokens, total_tokens)
else:
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
tokenizer,
text = None,
*,
filter_thres = 0.5,
temperature = 1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device = device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
text_tokens = torch.cat((text_tokens, sample[:, None]), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip = None,
filter_thres = 0.5,
temperature = 1.,
img = None,
num_init_img_tokens = None,
cond_scale = 1.,
use_cache = False,
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens, int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim = -1)
prev_cache = None
cache = {} if use_cache else None
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self.forward_with_cond_scale(text, image, cond_scale = cond_scale, cache = cache)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
sample = gumbel_sample(filtered_logits, temperature = temperature, dim = -1)
sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample[:, None]), dim=-1)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss = False)
return images, scores
return images
def forward_with_cond_scale(self, *args, cond_scale = 1, cache = None, **kwargs):
if cond_scale == 1:
return self(*args, **kwargs)
prev_cache = cache.copy() if exists(cache) else None
logits = self(*args, cache = cache, **kwargs)
# discovery by Katherine Crowson
# https://twitter.com/RiversHaveWings/status/1478093658716966912
null_cond_logits = self(*args, null_cond_prob = 1., cache = prev_cache, **kwargs)
return null_cond_logits + (logits - null_cond_logits) * cond_scale
def forward(
self,
text,
image = None,
return_loss = False,
null_cond_prob = 0.,
cache = None,
):
assert text.shape[-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
batch, device, total_seq_len = text.shape[0], text.device, self.total_seq_len
# randomly remove text condition with <null_cond_prob> probability
if null_cond_prob > 0:
null_mask = prob_mask_like((batch,), null_cond_prob, device = device)
text *= rearrange(~null_mask, 'b -> b 1')
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device = device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value = 0)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
if is_raw_image:
image_size = self.vae.image_size
channels = self.vae.channels
assert tuple(image.shape[1:]) == (channels, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(image_emb)
tokens = torch.cat((tokens, image_emb), dim = 1)
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if self.stable:
alpha = 0.1
tokens = tokens * alpha + tokens.detach() * (1 - alpha)
if exists(cache) and cache.get('offset'):
tokens = tokens[:, -1:]
out = self.transformer(tokens, cache=cache)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
if exists(cache) and cache.get('offset'):
logits_mask = logits_mask[:, -1:]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if exists(cache):
cache['offset'] = cache.get('offset', 0) + logits.shape[1]
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim = 1)
logits = rearrange(logits, 'b n c -> b c n')
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
| 23,608 | 34.13244 | 170 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/vae.py | import io
import sys
import os
import requests
import PIL
import warnings
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt, log
from packaging import version
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel, GumbelVQ
import importlib
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch import distributed_utils
# constants
CACHE_PATH = os.path.expanduser("~/.cache/dalle")
OPENAI_VAE_ENCODER_PATH = 'https://cdn.openai.com/dall-e/encoder.pkl'
OPENAI_VAE_DECODER_PATH = 'https://cdn.openai.com/dall-e/decoder.pkl'
VQGAN_VAE_PATH = 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1'
VQGAN_VAE_CONFIG_PATH = 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1'
# helpers methods
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location = torch.device('cpu'))
def map_pixels(x, eps = 0.1):
return (1 - 2 * eps) * x + eps
def unmap_pixels(x, eps = 0.1):
return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1)
def download(url, filename = None, root = CACHE_PATH):
if (
not distributed_utils.is_distributed
or distributed_utils.backend.is_local_root_worker()
):
os.makedirs(root, exist_ok = True)
filename = default(filename, os.path.basename(url))
download_target = os.path.join(root, filename)
download_target_tmp = os.path.join(root, f'tmp.{filename}')
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if (
distributed_utils.is_distributed
and not distributed_utils.backend.is_local_root_worker()
and not os.path.isfile(download_target)
):
# If the file doesn't exist yet, wait until it's downloaded by the root worker.
distributed_utils.backend.local_barrier()
if os.path.isfile(download_target):
return download_target
with urllib.request.urlopen(url) as source, open(download_target_tmp, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
os.rename(download_target_tmp, download_target)
if (
distributed_utils.is_distributed
and distributed_utils.backend.is_local_root_worker()
):
distributed_utils.backend.local_barrier()
return download_target
def make_contiguous(module):
with torch.no_grad():
for param in module.parameters():
param.set_(param.contiguous())
# package versions
def get_pkg_version(pkg_name):
from pkg_resources import get_distribution
return get_distribution(pkg_name).version
# pretrained Discrete VAE from OpenAI
class OpenAIDiscreteVAE(nn.Module):
def __init__(self):
super().__init__()
assert version.parse(get_pkg_version('torch')) < version.parse('1.11.0'), 'torch version must be <= 1.10 in order to use OpenAI discrete vae'
self.enc = load_model(download(OPENAI_VAE_ENCODER_PATH))
self.dec = load_model(download(OPENAI_VAE_DECODER_PATH))
make_contiguous(self)
self.channels = 3
self.num_layers = 3
self.image_size = 256
self.num_tokens = 8192
@torch.no_grad()
def get_codebook_indices(self, img):
img = map_pixels(img)
z_logits = self.enc.blocks(img)
z = torch.argmax(z_logits, dim = 1)
return rearrange(z, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
z = F.one_hot(img_seq, num_classes = self.num_tokens)
z = rearrange(z, 'b h w c -> b c h w').float()
x_stats = self.dec(z).float()
x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))
return x_rec
def forward(self, img):
raise NotImplemented
# VQGAN from Taming Transformers paper
# https://arxiv.org/abs/2012.09841
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class VQGanVAE(nn.Module):
def __init__(self, vqgan_model_path=None, vqgan_config_path=None):
super().__init__()
if vqgan_model_path is None:
model_filename = 'vqgan.1024.model.ckpt'
config_filename = 'vqgan.1024.config.yml'
download(VQGAN_VAE_CONFIG_PATH, config_filename)
download(VQGAN_VAE_PATH, model_filename)
config_path = str(Path(CACHE_PATH) / config_filename)
model_path = str(Path(CACHE_PATH) / model_filename)
else:
model_path = vqgan_model_path
config_path = vqgan_config_path
config = OmegaConf.load(config_path)
model = instantiate_from_config(config["model"])
state = torch.load(model_path, map_location = 'cpu')['state_dict']
model.load_state_dict(state, strict = False)
print(f"Loaded VQGAN from {model_path} and {config_path}")
self.model = model
# f as used in https://github.com/CompVis/taming-transformers#overview-of-pretrained-models
f = config.model.params.ddconfig.resolution / config.model.params.ddconfig.attn_resolutions[0]
self.num_layers = int(log(f)/log(2))
self.channels = 3
self.image_size = 256
self.num_tokens = config.model.params.n_embed
self.is_gumbel = isinstance(self.model, GumbelVQ)
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(
self, self.model.quantize.embed.weight if self.is_gumbel else self.model.quantize.embedding.weight)
@torch.no_grad()
def get_codebook_indices(self, img):
b = img.shape[0]
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
if self.is_gumbel:
return rearrange(indices, 'b h w -> b (h w)', b=b)
return rearrange(indices, '(b n) -> b n', b = b)
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = F.one_hot(img_seq, num_classes = self.num_tokens).float()
z = one_hot_indices @ self.model.quantize.embed.weight if self.is_gumbel \
else (one_hot_indices @ self.model.quantize.embedding.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h = int(sqrt(n)))
img = self.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
def forward(self, img):
raise NotImplemented
| 7,674 | 31.939914 | 149 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/distributed_utils.py | """
Utility functions for optional distributed execution.
To use,
1. set the `BACKENDS` to the ones you want to make available,
2. in the script, wrap the argument parser with `wrap_arg_parser`,
3. in the script, set and use the backend by calling
`set_backend_from_args`.
You can check whether a backend is in use with the `using_backend`
function.
"""
from dalle_pytorch.distributed_backends import \
DeepSpeedBackend, \
DummyBackend, \
HorovodBackend
_DEFAULT_BACKEND = DummyBackend()
"""Which backend to use by default. Assumed to be _not_ distributed."""
BACKENDS = [
_DEFAULT_BACKEND,
DeepSpeedBackend(),
HorovodBackend(),
]
is_distributed = None
"""Whether we are distributed."""
backend = None
"""Backend in usage."""
def wrap_arg_parser(parser):
"""Add arguments to support optional distributed backend usage."""
parser.add_argument(
'--distributed_backend',
'--distr_backend',
type=str,
default=None,
help='which distributed backend to use. Do not distribute by default',
)
for distr_backend in BACKENDS:
parser = distr_backend.wrap_arg_parser(parser)
return parser
def set_backend_from_args(args):
"""Set and return the backend based on the given `args`."""
global is_distributed, backend
# Handle this specially for backwards compatibility.
if args.deepspeed:
args.distributed_backend = DeepSpeedBackend.BACKEND_NAME
if not args.distributed_backend:
is_distributed = False
backend = _DEFAULT_BACKEND
return backend
backend_name = args.distributed_backend.lower()
for distr_backend in BACKENDS:
if distr_backend.BACKEND_NAME.lower() == backend_name:
backend = distr_backend
if not backend.has_backend():
raise ModuleNotFoundError(
f'{backend.BACKEND_NAME} backend selected but '
'module not available'
)
print(f'Using {backend.BACKEND_NAME} for distributed execution')
is_distributed = True
return backend
raise ValueError(
'unknown backend; please check `distributed_utils.BACKENDS`')
def require_set_backend():
"""Raise an `AssertionError` when the backend has not been set."""
assert backend is not None, (
'distributed backend is not set. Please call '
'`distributed_utils.set_backend_from_args` at the start of your script'
)
def using_backend(test_backend):
"""Return whether the backend is set to `test_backend`.
`test_backend` may be a string of the name of the backend or
its class.
"""
require_set_backend()
if isinstance(test_backend, str):
return backend.BACKEND_NAME == test_backend
return isinstance(backend, test_backend)
| 2,839 | 28.278351 | 79 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/transformer.py | from collections import deque
from collections.abc import Iterable
from functools import partial
from itertools import islice, cycle
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch.reversible import ReversibleSequence, SequentialSequence
from dalle_pytorch.attention import Attention, SparseAttention, SparseConvCausalAttention, SparseAxialCausalAttention
from rotary_embedding_torch import RotaryEmbedding, broadcat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def cast_tuple(val, depth = 1):
return val if isinstance(val, Iterable) else (val,) * depth
# classes
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim = self.dim, keepdim = True).detach()
return x / maxes
class NonCached(nn.Module):
"""
A wrapper for layers that don't support the inference cache themselves.
Reconstructs the full sequence before the layer and
cuts the suffix of the outputs after the layer.
"""
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *, cache = None, cache_key = None, **kwargs):
n = x.shape[-2]
if exists(cache):
if cache_key in cache:
x = torch.cat([cache[cache_key], x], dim=-2)
cache[cache_key] = x
out = self.fn(x, **kwargs)
return out[:, -n:]
class CachedAs(nn.Module):
"""
A wrapper that defines a key for the inference cache.
"""
def __init__(self, cache_key, fn):
super().__init__()
self.cache_key = cache_key
self.fn = fn
def forward(self, x, *, cache=None, **kwargs):
return self.fn(x, cache=cache, cache_key=self.cache_key, **kwargs)
# https://arxiv.org/abs/2103.17239
class LayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth <= 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = torch.zeros(1, 1, dim).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.scale
# layer norm
class PreNorm(nn.Module):
def __init__(self, dim, fn, sandwich = False):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.norm_out = nn.LayerNorm(dim) if sandwich else nn.Identity()
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
x = self.fn(x, **kwargs)
return self.norm_out(x)
# feed forward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, dropout = 0., mult = 4.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x, cache=None, cache_key=None):
return self.net(x)
# token shift classes
class PreShiftToken(nn.Module):
def __init__(self, fn, image_size, seq_len):
super().__init__()
self.fn = fn
self.image_size = image_size
self.seq_len = seq_len
self.img_seq_len = image_size ** 2
self.text_len = seq_len - self.img_seq_len + 1
def forward(self, x, cache=None, cache_key=None, **kwargs):
seq_len, image_size, text_len = self.seq_len, self.image_size, self.text_len
if exists(cache) and cache_key in cache:
offset = cache['offset']
assert offset >= text_len, "cached inference for text is not supported"
q = cache[cache_key]
assert isinstance(q, deque) and len(q) == image_size
x_top, x_left, *x_pass = x[:, -1].chunk(4, dim=-1)
q.append((x_top, x_left))
x_top = q.popleft()[0]
x_left = q[-2][1]
if (offset - text_len) % image_size == 0:
x_left = torch.zeros_like(x_left)
x = torch.cat((x_top, x_left, *x_pass), dim=-1)
return self.fn(x[:, None], cache=cache, **kwargs)
n = x.shape[1]
padding = seq_len - n + 1
# if sequence is shorter than the text length, no image tokens to shift
if n < text_len:
return self.fn(x, **kwargs)
# get text and image tokens
x_text, x_img = x[:, :text_len], x[:, text_len:]
x_img = F.pad(x_img, (0, 0, 0, padding))
x_img = rearrange(x_img, 'b (h w) d -> b h w d', h = image_size)
# shift 1 from the left for text tokens
x_text_shift, x_text_pass = x_text.chunk(2, dim = -1)
x_text_shift = F.pad(x_text_shift, (0, 0, 1, -1))
x_text = torch.cat((x_text_shift, x_text_pass), dim = -1)
# shift from top, left for image tokens
x_img_shift_top, x_img_shift_left, *x_img_pass = x_img.chunk(4, dim = -1)
x_img_shift_left = F.pad(x_img_shift_left, (0, 0, 1, -1))
x_img_shift_top = F.pad(x_img_shift_top, (0, 0, 0, 0, 1, -1))
x_img = torch.cat((x_img_shift_top, x_img_shift_left, *x_img_pass), dim = -1)
# merge text and image sequence back together
x_img = rearrange(x_img, 'b h w d -> b (h w) d')
x_img = x_img[:, :-padding]
x = torch.cat((x_text, x_img), dim = 1)
if exists(cache):
dummy_top, dummy_left, *_ = x[:, -1].chunk(4, dim=-1)
dummy_top, dummy_left = torch.zeros_like(dummy_top), torch.zeros_like(dummy_left)
q = deque()
x_img = x_img[:, -image_size:]
for _ in range(image_size - x_img.shape[1]):
q.append((dummy_top, dummy_left))
for i in range(x_img.shape[1]):
q.append(x_img[:, i].chunk(4, dim=-1)[:2])
cache[cache_key] = q
return self.fn(x, cache=cache, **kwargs)
# main transformer class
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
depth,
seq_len,
reversible = False,
causal = True,
heads = 8,
dim_head = 64,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
attn_types = None,
image_fmap_size = None,
sparse_attn = False,
stable = False,
sandwich_norm = False,
shift_tokens = False,
rotary_emb = True,
shared_attn_ids = None,
shared_ff_ids = None,
optimize_for_inference = False, # use cache-friendly masked attention instead of sparse one
):
super().__init__()
layers = nn.ModuleList([])
sparse_layer = cast_tuple(sparse_attn, depth)
self.seq_len = seq_len
self.image_fmap_size = image_fmap_size
attn_types = default(attn_types, ('full',))
attn_types = cast_tuple(attn_types)
attn_type_layer = islice(cycle(attn_types), depth)
shared_attn_ids = cycle(default(shared_attn_ids, range(depth)))
shared_ff_ids = cycle(default(shared_ff_ids, range(depth)))
shared_attn_layers = {}
shared_ff_layers = {}
for (ind, sparse_attn, attn_type, attn_id, ff_id) in \
zip(range(depth), sparse_layer, attn_type_layer, shared_attn_ids, shared_ff_ids):
if attn_type == 'full':
attn_class = partial(Attention, stable = stable)
elif attn_type == 'sparse':
attn_class = SparseAttention
elif attn_type == 'axial_row':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 0, image_size = image_fmap_size, stable = stable)
elif attn_type == 'axial_col':
if optimize_for_inference:
attn_class = partial(Attention, stable = stable, static_mask = self._get_attention_mask(attn_type))
else:
attn_class = partial(SparseAxialCausalAttention, seq_len = seq_len, axis = 1, image_size = image_fmap_size, stable = stable)
elif attn_type == 'conv_like':
attn_class = partial(SparseConvCausalAttention, seq_len = seq_len, image_size = image_fmap_size, stable = stable)
else:
raise ValueError(f'attention type "{attn_type}" is not valid')
attn, reused_attn_type = shared_attn_layers.get(attn_id, (None, None))
if not exists(attn):
attn = attn_class(dim, causal = causal, seq_len = seq_len, heads = heads, dim_head = dim_head, dropout = attn_dropout)
shared_attn_layers[attn_id] = (attn, attn_type)
elif attn_type != reused_attn_type:
raise ValueError('attn_types do not match shared_attn_ids '
f'(ind = {ind}, attn_type = "{attn_type}", reused_attn_type = "{reused_attn_type}")')
ff = shared_ff_layers.get(ff_id)
if not exists(ff):
ff = FeedForward(dim, mult = ff_mult, dropout = ff_dropout)
shared_ff_layers[ff_id] = ff
if isinstance(attn, Attention):
attn = CachedAs(f'attn_{ind}', attn)
else:
# at the moment, other attention classes don't support cache
attn = NonCached(attn)
if shift_tokens:
attn = CachedAs(f'preshift_attn_{ind}', PreShiftToken(attn, image_size = image_fmap_size, seq_len = seq_len))
ff = CachedAs(f'preshift_ff_{ind}', PreShiftToken(ff, image_size = image_fmap_size, seq_len = seq_len))
layers.append(nn.ModuleList([
LayerScale(dim, ind + 1, PreNorm(dim, attn, sandwich = sandwich_norm)),
LayerScale(dim, ind + 1, PreNorm(dim, ff, sandwich = sandwich_norm))
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
route_attn = ((True, False),) * depth
route_all = ((True, True),) * depth
attn_route_map = {'mask': route_attn, 'rotary_pos_emb': route_attn,
'cache': route_all}
self.layers = execute_type(layers, args_route = attn_route_map)
# generate positional embeddings for rotary
pos_emb = None
if rotary_emb:
rot_dim = dim_head // 3
img_seq_len = (image_fmap_size ** 2)
text_len = seq_len - img_seq_len + 1
text_pos_emb = RotaryEmbedding(dim = rot_dim)
img_axial_pos_emb = RotaryEmbedding(dim = rot_dim, freqs_for = 'pixel')
text_freqs = text_pos_emb(torch.arange(text_len))
img_to_text_freqs = text_pos_emb(torch.full((img_seq_len,), 8192)) # image is given a position far away from text
text_freqs = torch.cat((text_freqs, img_to_text_freqs), dim = 0)
img_freqs_axial = img_axial_pos_emb(torch.linspace(-1, 1, steps = image_fmap_size))
img_freqs = broadcat((rearrange(img_freqs_axial, 'i d -> i () d'), rearrange(img_freqs_axial, 'j d -> () j d')), dim = -1)
img_freqs = rearrange(img_freqs, 'h w d -> (h w) d')
text_axial_freqs = img_axial_pos_emb(torch.full((text_len,), -10.)) # text is given a position of -10 apart from the image axial positions, which is from range [-1, 1]
text_axial_freqs = torch.cat((text_axial_freqs, text_axial_freqs), dim = -1)
img_freqs = torch.cat((text_axial_freqs, img_freqs), dim = 0)
pos_emb = torch.cat((text_freqs, img_freqs), dim = -1)
pos_emb = rearrange(pos_emb, 'n d -> () n d')
self.register_buffer('pos_emb', pos_emb)
def forward(self, x, **kwargs):
return self.layers(x, rotary_pos_emb = self.pos_emb, **kwargs)
def _get_attention_mask(self, attn_type):
img_seq_len = self.image_fmap_size ** 2
text_len = self.seq_len + 1 - img_seq_len
static_mask = torch.zeros(self.seq_len, self.seq_len, dtype=torch.bool)
static_mask[:, :text_len] = True
if attn_type == 'axial_row':
for row in range(self.image_fmap_size):
begin = text_len + row * self.image_fmap_size
end = text_len + (row + 1) * self.image_fmap_size
static_mask[begin:end, begin:end] = True
elif attn_type == 'axial_col':
for col in range(self.image_fmap_size):
begin = text_len + col
static_mask[begin::self.image_fmap_size, begin::self.image_fmap_size] = True
else:
raise ValueError(f'attention type "{attn_type}" can\'t be simulated with a static mask')
return static_mask
| 13,131 | 36.413105 | 180 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/tokenizer.py | # take from https://github.com/openai/CLIP/blob/main/clip/simple_tokenizer.py
# to give users a quick easy start to training DALL-E without doing BPE
import torch
import youtokentome as yttm
from tokenizers import Tokenizer
from tokenizers.processors import ByteLevel
from transformers import BertTokenizer
import html
import os
from functools import lru_cache
from pathlib import Path
import ftfy
import regex as re
# OpenAI simple tokenizer
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/bpe_simple_vocab_16e6.txt")
@lru_cache()
def bytes_to_unicode():
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = Path(bpe_path).read_text(encoding='utf8').split('\n')
merges = merges[1:49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + '</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.vocab_size = 49408
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token + '</w>'
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens, remove_start_end = True, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
if remove_start_end:
tokens = [token for token in tokens if token not in (49406, 40407, 0)]
text = ''.join([self.decoder[token] for token in tokens if token not in pad_tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
tokenizer = SimpleTokenizer()
# huggingface tokenizer
class HugTokenizer:
def __init__(self, bpe_path = None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
tokenizer = Tokenizer.from_file(str(bpe_path))
tokenizer.post_processor = ByteLevel(trim_offsets = True)
self.tokenizer = tokenizer
self.vocab_size = tokenizer.get_vocab_size()
def decode(self, tokens, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
ignore_ids = pad_tokens.union({0})
tokens = [token for token in tokens if token not in ignore_ids]
return self.tokenizer.decode(tokens, skip_special_tokens = True)
def encode(self, text):
return self.tokenizer.encode(text).ids
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
# chinese tokenizer
class ChineseTokenizer:
def __init__(self):
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size
def decode(self, tokens, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
ignore_ids = pad_tokens.union({0})
tokens = [token for token in tokens if token not in ignore_ids]
return self.tokenizer.decode(tokens)
def encode(self, text):
return torch.tensor(self.tokenizer.encode(text, add_special_tokens = False))
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = [self.encode(text) for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
# yttm tokenizer
class YttmTokenizer:
def __init__(self, bpe_path = None):
bpe_path = Path(bpe_path)
assert bpe_path.exists(), f'BPE json path {str(bpe_path)} does not exist'
tokenizer = yttm.BPE(model = str(bpe_path))
self.tokenizer = tokenizer
self.vocab_size = tokenizer.vocab_size()
def decode(self, tokens, pad_tokens = set()):
if torch.is_tensor(tokens):
tokens = tokens.tolist()
return self.tokenizer.decode(tokens, ignore_ids = pad_tokens.union({0}))
def encode(self, texts):
encoded = self.tokenizer.encode(texts, output_type = yttm.OutputType.ID)
return list(map(torch.tensor, encoded))
def tokenize(self, texts, context_length = 256, truncate_text = False):
if isinstance(texts, str):
texts = [texts]
all_tokens = self.encode(texts)
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
if truncate_text:
tokens = tokens[:context_length]
else:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 9,432 | 34.329588 | 120 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/__init__.py | from dalle_pytorch.dalle_pytorch import DALLE, CLIP, DiscreteVAE
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from pkg_resources import get_distribution
from dalle_pytorch.version import __version__
| 213 | 34.666667 | 64 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/attention.py | from inspect import isfunction
from math import ceil
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from rotary_embedding_torch import apply_rotary_emb
# helpers
def exists(val):
return val is not None
def uniq(arr):
return{el: True for el in arr}.keys()
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def stable_softmax(t, dim = -1, alpha = 32 ** 2):
t = t / alpha
t = t - torch.amax(t, dim = dim, keepdim = True).detach()
return (t * alpha).softmax(dim = dim)
def apply_pos_emb(pos_emb, qkv):
n = qkv[0].shape[-2]
pos_emb = pos_emb[..., :n, :]
return tuple(map(lambda t: apply_rotary_emb(pos_emb, t), qkv))
# classes
class Attention(nn.Module):
def __init__(self, dim, seq_len, causal = True, heads = 8, dim_head = 64, dropout = 0., stable = False,
static_mask = None):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.seq_len = seq_len
self.scale = dim_head ** -0.5
self.stable = stable
self.causal = causal
self.register_buffer('static_mask', static_mask, persistent=False)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None, cache = None, cache_key = None):
b, n, _, h, device = *x.shape, self.heads, x.device
softmax = torch.softmax if not self.stable else stable_softmax
offset = cache.get('offset', 0) if exists(cache) else 0
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb[..., offset:, :], (q, k, v))
q = q * self.scale
if offset > 0:
k_top, v_top = cache[cache_key]
k = torch.cat([k_top, k], dim=-2)
v = torch.cat([v_top, v], dim=-2)
if exists(cache):
cache[cache_key] = k, v
dots = torch.einsum('b h i d, b h j d -> b h i j', q, k)
mask_value = max_neg_value(dots)
if exists(mask):
mask = rearrange(mask, 'b j -> b () () j')
dots.masked_fill_(~mask, mask_value)
del mask
if self.causal and offset == 0: # causality is naturally enforced for the cached inference
i, j = dots.shape[-2:]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots.masked_fill_(mask, mask_value)
if exists(self.static_mask):
dots.masked_fill_(~self.static_mask[offset:offset + n, :offset + n], mask_value)
attn = softmax(dots, dim=-1)
out = torch.einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
# sparse attention with convolutional pattern, as mentioned in the blog post. customizable kernel size and dilation
class SparseConvCausalAttention(nn.Module):
def __init__(self, dim, seq_len, image_size = 32, kernel_size = 5, dilation = 1, heads = 8, dim_head = 64, dropout = 0., stable = False, **kwargs):
super().__init__()
assert kernel_size % 2 == 1, 'kernel size must be odd'
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads = heads
self.scale = dim_head ** -0.5
self.image_size = image_size
self.kernel_size = kernel_size
self.dilation = dilation
self.stable = stable
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, img_size, kernel_size, dilation, seq_len, device = *x.shape, self.heads, self.image_size, self.kernel_size, self.dilation, self.seq_len, x.device
softmax = torch.softmax if not self.stable else stable_softmax
img_seq_len = img_size ** 2
text_len = seq_len + 1 - img_seq_len
# padding
padding = seq_len - n + 1
mask = default(mask, lambda: torch.ones(b, text_len, device = device).bool())
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = mask[:, :text_len]
# derive query / keys / values
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
q *= self.scale
((q_text, q_img), (k_text, k_img), (v_text, v_img)) = map(lambda t: (t[:, :-img_seq_len], t[:, -img_seq_len:]), (q, k, v))
# text attention
dots_text = einsum('b i d, b j d -> b i j', q_text, k_text)
mask_value = max_neg_value(dots_text)
i, j = dots_text.shape[-2:]
text_causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots_text.masked_fill_(text_causal_mask, mask_value)
attn_text = softmax(dots_text, dim = -1)
out_text = einsum('b i j, b j d -> b i d', attn_text, v_text)
# image attention
effective_kernel_size = (kernel_size - 1) * dilation + 1
same_padding = effective_kernel_size // 2
causal_padding = (same_padding * 2, 0, same_padding * 2, 0)
k_img, v_img = map(lambda t: rearrange(t, 'b (h w) c -> b c h w', h = img_size), (k_img, v_img))
k_img, v_img = map(lambda t: F.pad(t, causal_padding), (k_img, v_img))
k_img, v_img = map(lambda t: F.unfold(t, kernel_size, dilation = dilation), (k_img, v_img))
k_img, v_img = map(lambda t: rearrange(t, 'b (d j) i -> b i j d', j = kernel_size ** 2), (k_img, v_img))
# let image attend to all of text
dots_image = einsum('b i d, b i j d -> b i j', q_img, k_img)
dots_image_to_text = einsum('b i d, b j d -> b i j', q_img, k_text)
# use padding of 0 on tensor of 1s and unfold for padding mask
i, j = dots_image.shape[-2:]
ones = torch.ones((img_seq_len,), device = device)
ones = rearrange(ones, '(h w) -> () () h w', h = img_size)
ones = F.pad(ones, causal_padding, value = 0.)
ones = F.unfold(ones, kernel_size, dilation = dilation)
ones = rearrange(ones, 'b j i -> b i j')
# mask image attention
padding_mask = ones == 0.
# concat text mask with image causal mask
padding_mask = repeat(padding_mask, '() i j -> b i j', b = b * h)
mask = repeat(mask, 'b j -> (b h) i j', i = i, h = h)
mask = torch.cat((~mask, padding_mask), dim = -1)
# image can attend to all of text
dots = torch.cat((dots_image_to_text, dots_image), dim = -1)
dots.masked_fill_(mask, mask_value)
attn = softmax(dots, dim = -1)
# aggregate
attn_image_to_text, attn_image = attn[..., :text_len], attn[..., text_len:]
out_image_to_image = einsum('b i j, b i j d -> b i d', attn_image, v_img)
out_image_to_text = einsum('b i j, b j d -> b i d', attn_image_to_text, v_text)
out_image = out_image_to_image + out_image_to_text
# combine attended values for both text and image
out = torch.cat((out_text, out_image), dim = 1)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return out[:, :n]
# sparse axial causal attention
class SparseAxialCausalAttention(nn.Module):
def __init__(self, dim, seq_len, image_size = 32, axis = 0, heads = 8, dim_head = 64, dropout = 0., stable = False, **kwargs):
super().__init__()
assert axis in {0, 1}, 'axis must be either 0 (along height) or 1 (along width)'
self.axis = axis
inner_dim = dim_head * heads
self.seq_len = seq_len
self.heads = heads
self.scale = dim_head ** -0.5
self.image_size = image_size
self.stable = stable
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, img_size, axis, seq_len, device = *x.shape, self.heads, self.image_size, self.axis, self.seq_len, x.device
softmax = torch.softmax if not self.stable else stable_softmax
img_seq_len = img_size ** 2
text_len = seq_len + 1 - img_seq_len
# padding
padding = seq_len - n + 1
mask = default(mask, lambda: torch.ones(b, text_len, device = device).bool())
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = mask[:, :text_len]
# derive queries / keys / values
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
q *= self.scale
((q_text, q_img), (k_text, k_img), (v_text, v_img)) = map(lambda t: (t[:, :-img_seq_len], t[:, -img_seq_len:]), (q, k, v))
# text attention
dots_text = einsum('b i d, b j d -> b i j', q_text, k_text)
mask_value = max_neg_value(dots_text)
i, j = dots_text.shape[-2:]
text_causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
dots_text.masked_fill_(text_causal_mask, mask_value)
attn_text = softmax(dots_text, dim = -1)
out_text = einsum('b i j, b j d -> b i d', attn_text, v_text)
# image attention
split_axis_einops = 'b (h w) c -> b h w c' if axis == 0 else 'b (h w) c -> b w h c'
merge_axis_einops = 'b x n d -> b (x n) d' if axis == 0 else 'b x n d -> b (n x) d'
# split out axis
q_img, k_img, v_img = map(lambda t: rearrange(t, split_axis_einops, h = img_size), (q_img, k_img, v_img))
# similarity
dots_image_to_image = einsum('b x i d, b x j d -> b x i j', q_img, k_img)
dots_image_to_text = einsum('b x i d, b j d -> b x i j', q_img, k_text)
dots = torch.cat((dots_image_to_text, dots_image_to_image), dim = -1)
# mask so image has full attention to text, but causal along axis
bh, x, i, j = dots.shape
causal_mask = torch.ones(i, img_size, device = device).triu_(img_size - i + 1).bool()
causal_mask = repeat(causal_mask, 'i j -> b x i j', b = bh, x = x)
mask = repeat(mask, 'b j -> (b h) x i j', h = h, x = x, i = i)
mask = torch.cat((~mask, causal_mask), dim = -1)
dots.masked_fill_(mask, mask_value)
# attention.
attn = softmax(dots, dim = -1)
# aggregate
attn_image_to_text, attn_image_to_image = attn[..., :text_len], attn[..., text_len:]
out_image_to_image = einsum('b x i j, b x j d -> b x i d', attn_image_to_image, v_img)
out_image_to_text = einsum('b x i j, b j d -> b x i d', attn_image_to_text, v_text)
out_image = out_image_to_image + out_image_to_text
# merge back axis
out_image = rearrange(out_image, merge_axis_einops, x = img_size)
# combine attended values for both text and image
out = torch.cat((out_text, out_image), dim = 1)
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
out = self.to_out(out)
return out[:, :n]
# microsoft sparse attention CUDA kernel
class SparseAttention(Attention):
def __init__(
self,
*args,
block_size = 16,
text_seq_len = 256,
num_random_blocks = None,
**kwargs
):
super().__init__(*args, **kwargs)
from deepspeed.ops.sparse_attention import SparseSelfAttention, VariableSparsityConfig
self.block_size = block_size
num_random_blocks = default(num_random_blocks, self.seq_len // block_size // 4)
global_block_indices = list(range(ceil(text_seq_len / block_size)))
self.attn_fn = SparseSelfAttention(
sparsity_config = VariableSparsityConfig(
num_heads = self.heads,
block = self.block_size,
num_random_blocks = num_random_blocks,
global_block_indices = global_block_indices,
attention = 'unidirectional' if self.causal else 'bidirectional'
),
max_seq_length = self.seq_len,
attn_mask_mode = 'add'
)
def forward(self, x, mask = None, rotary_pos_emb = None):
b, n, _, h, device = *x.shape, self.heads, x.device
remainder = n % self.block_size
mask = default(mask, lambda: torch.ones(b, n, device = device).bool())
if remainder > 0:
padding = self.block_size - remainder
x = F.pad(x, (0, 0, 0, padding), value = 0)
mask = F.pad(mask, (0, padding), value = False)
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
if exists(rotary_pos_emb):
q, k, v = apply_pos_emb(rotary_pos_emb, (q, k, v))
key_pad_mask = None
if exists(mask):
key_pad_mask = ~mask
attn_mask = None
if self.causal:
i, j = q.shape[-2], k.shape[-2]
mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
attn_mask = torch.zeros(i, j, device = device).to(q)
mask_value = max_neg_value(q) / 2
attn_mask.masked_fill_(mask, mask_value)
out = self.attn_fn(q, k, v, attn_mask = attn_mask, key_padding_mask = key_pad_mask)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out[:, :n]
| 14,131 | 34.418546 | 165 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/loader.py | from pathlib import Path
from random import randint, choice
import PIL
from torch.utils.data import Dataset
from torchvision import transforms as T
class TextImageDataset(Dataset):
def __init__(self,
folder,
text_len=256,
image_size=128,
truncate_captions=False,
resize_ratio=0.75,
transparent=False,
tokenizer=None,
shuffle=False
):
"""
@param folder: Folder containing images and text files matched by their paths' respective "stem"
@param truncate_captions: Rather than throw an exception, captions which are too long will be truncated.
"""
super().__init__()
self.shuffle = shuffle
path = Path(folder)
text_files = [*path.glob('**/*.txt')]
image_files = [
*path.glob('**/*.png'), *path.glob('**/*.jpg'),
*path.glob('**/*.jpeg'), *path.glob('**/*.bmp')
]
text_files = {text_file.stem: text_file for text_file in text_files}
image_files = {image_file.stem: image_file for image_file in image_files}
keys = (image_files.keys() & text_files.keys())
self.keys = list(keys)
self.text_files = {k: v for k, v in text_files.items() if k in keys}
self.image_files = {k: v for k, v in image_files.items() if k in keys}
self.text_len = text_len
self.truncate_captions = truncate_captions
self.resize_ratio = resize_ratio
self.tokenizer = tokenizer
image_mode = 'RGBA' if transparent else 'RGB'
self.image_transform = T.Compose([
T.Lambda(lambda img: img.convert(image_mode)
if img.mode != image_mode else img),
T.RandomResizedCrop(image_size,
scale=(self.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor()
])
def __len__(self):
return len(self.keys)
def random_sample(self):
return self.__getitem__(randint(0, self.__len__() - 1))
def sequential_sample(self, ind):
if ind >= self.__len__() - 1:
return self.__getitem__(0)
return self.__getitem__(ind + 1)
def skip_sample(self, ind):
if self.shuffle:
return self.random_sample()
return self.sequential_sample(ind=ind)
def __getitem__(self, ind):
key = self.keys[ind]
text_file = self.text_files[key]
image_file = self.image_files[key]
descriptions = text_file.read_text().split('\n')
descriptions = list(filter(lambda t: len(t) > 0, descriptions))
try:
description = choice(descriptions)
except IndexError as zero_captions_in_file_ex:
print(f"An exception occurred trying to load file {text_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
tokenized_text = self.tokenizer.tokenize(
description,
self.text_len,
truncate_text=self.truncate_captions
).squeeze(0)
try:
image_tensor = self.image_transform(PIL.Image.open(image_file))
except (PIL.UnidentifiedImageError, OSError) as corrupt_image_exceptions:
print(f"An exception occurred trying to load file {image_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
# Success
return tokenized_text, image_tensor
| 3,558 | 33.221154 | 112 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/distributed_backends/deepspeed_backend.py | import json
import os
import torch
from .distributed_backend import DistributedBackend
class DeepSpeedBackend(DistributedBackend):
"""Distributed backend using the DeepSpeed engine."""
BACKEND_MODULE_NAME = 'deepspeed'
BACKEND_NAME = 'DeepSpeed'
def wrap_arg_parser(self, parser):
if not self.has_backend():
parser.add_argument(
'--deepspeed',
type=lambda _: False,
help=(
'whether to use DeepSpeed '
"(ignored since it's not available)"
),
)
else:
parser = self.backend_module.add_config_arguments(parser)
parser.add_argument(
'--local_rank',
type=int,
default=-1,
help='local rank passed from distributed launcher',
)
return parser
def _initialize(self):
self.backend_module.init_distributed()
if torch.cuda.is_available():
torch.cuda.set_device(self._get_local_rank())
@staticmethod
def _require_torch_distributed_init():
"""Raise an error when `torch.distributed` has not been
initialized yet.
"""
assert torch.distributed.is_initialized(), \
('`torch.distributed` is not initialized; please call '
'`DeepSpeedBackend.initialize` at the start of your script')
def _get_world_size(self):
self._require_torch_distributed_init()
return torch.distributed.get_world_size()
def _get_rank(self):
self._require_torch_distributed_init()
return torch.distributed.get_rank()
def _get_local_rank(self):
self._require_torch_distributed_init()
return int(os.environ['LOCAL_RANK'])
def _local_barrier(self):
self._require_torch_distributed_init()
torch.distributed.barrier()
def _check_args(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
after checking the values passed to `distribute`.
"""
self._check_argvs(args, optimizer, lr_scheduler, kwargs)
(optimizer, lr_scheduler) = self._check_config(
args, optimizer, lr_scheduler, kwargs)
return (optimizer, lr_scheduler)
def _check_argvs(self, args, optimizer, lr_scheduler, kwargs):
"""Apply several sanity checks to the given command
line arguments.
"""
has_json_config = (hasattr(args, 'deepspeed_config')
and args.deepspeed_config is not None)
has_dict_config = 'config_params' in kwargs
if (
# No config given
(not has_json_config and not has_dict_config)
# JSON config file does not exist
or (not has_dict_config
and not os.path.isfile(args.deepspeed_config))
):
# Let DeepSpeed handle these argument errors.
return
if not args.deepspeed:
print(
'WARNING: DeepSpeed backend was selected; setting '
'`args.deepspeed = True`'
)
args.deepspeed = True
if has_json_config and has_dict_config:
print(
'WARNING: DeepSpeed config was given as both JSON file and '
'Python dictionary. Python dictionary takes precedence.'
)
def _check_config(self, args, optimizer, lr_scheduler, kwargs):
"""Return an appropriate optimizer and learning rate scheduler
for the DeepSpeed configuration.
"""
if 'config_params' in kwargs:
config = kwargs['config_params']
else:
with open(args.deepspeed_config, 'r') as json_config_file:
config = json.load(json_config_file)
if 'optimizer' in config and optimizer is not None:
print(
'WARNING: Optimizer encountered in both DeepSpeed config and '
'keyword arguments. Optimizer in DeepSpeed config '
'takes precedence.'
)
optimizer = None
if 'scheduler' in config and lr_scheduler is not None:
print(
'WARNING: Learning rate scheduler encountered in both '
'DeepSpeed config and keyword arguments. Learning rate '
'scheduler in DeepSpeed config takes precedence.'
)
# For the LR scheduler, the JSON config already has
# precedence. We do this for forward compatibility.
lr_scheduler = None
return (optimizer, lr_scheduler)
def _distribute(
self,
args=None,
model=None,
optimizer=None,
model_parameters=None,
training_data=None,
lr_scheduler=None,
**kwargs,
):
"""Return a distributed model engine, optimizer, dataloader, and
learning rate scheduler. These are obtained by wrapping the
given values with the backend.
For the other or other possible arguments,
see `deepspeed.initialize`.
"""
(optimizer, lr_scheduler) = self._check_args(
args, optimizer, lr_scheduler, kwargs)
return self.backend_module.initialize(
args=args,
model=model,
optimizer=optimizer,
model_parameters=model_parameters,
training_data=training_data,
lr_scheduler=lr_scheduler,
**kwargs,
)
def _average_all(self, tensor):
self._require_torch_distributed_init()
# We copy because modification happens in-place
averaged = tensor.detach().clone()
# We use `all_reduce` because it is better supported than `reduce`
torch.distributed.all_reduce(averaged, torch.distributed.ReduceOp.SUM)
return averaged / self.get_world_size()
| 5,987 | 33.813953 | 78 | py |
DALLE-pytorch | DALLE-pytorch-main/dalle_pytorch/distributed_backends/horovod_backend.py | import torch
from .distributed_backend import DistributedBackend
class HorovodBackend(DistributedBackend):
"""Distributed backend using Horovod."""
BACKEND_MODULE_NAME = 'horovod.torch'
BACKEND_NAME = 'Horovod'
def wrap_arg_parser(self, parser):
return parser
def check_batch_size(self, batch_size):
# Horovod uses the local batch size to determine the effective
# batch size.
pass
def _initialize(self):
self.backend_module.init()
if torch.cuda.is_available():
torch.cuda.set_device(self._get_local_rank())
def _get_world_size(self):
return self.backend_module.size()
def _get_rank(self):
return self.backend_module.rank()
def _get_local_rank(self):
return self.backend_module.local_rank()
def _local_barrier(self):
# Actually a global barrier but works for our purposes.
self.backend_module.join()
def _distribute(
self,
_args=None,
model=None,
optimizer=None,
_model_parameters=None,
training_data=None,
lr_scheduler=None,
**_kwargs,
):
optimizer = self.backend_module.DistributedOptimizer(optimizer)
self.backend_module.broadcast_parameters(
model.state_dict(), root_rank=self.ROOT_RANK)
self.backend_module.broadcast_optimizer_state(
optimizer, root_rank=self.ROOT_RANK)
return (model, optimizer, training_data, lr_scheduler)
def _average_all(self, tensor):
# Reduce op is average by default
averaged = self.backend_module.allreduce(tensor)
return averaged
| 1,703 | 27.881356 | 71 | py |
covid-vax-stance | covid-vax-stance-main/classifier/classifier_predict.py | # Libraries
import torch
from torchtext.data import Field, TabularDataset, Iterator
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoTokenizer, BertForSequenceClassification
import os
import csv
import time
import argparse
import warnings
torch.manual_seed(42)
warnings.filterwarnings('ignore')
#%% PARSE ARGUMENTS
CUDA_DEV = 'cuda:0' if torch.cuda.is_available() else 'cpu'
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("in_path", type=str, help="path to a CSV file or directory where the CSV files are stored")
parser.add_argument("--text_col", type=str, default="text", help="Header of the text column in the CSV files")
parser.add_argument("--out_dir", type=str, default=".", help="path to the directory where the predicted output files will be stored (default: same directory as the code file)")
parser.add_argument("--model_dir", type=str, default="./covid_vax_model/", help="path to the directory where the trained model is stored")
parser.add_argument("--device", type=str, default=CUDA_DEV, help="cuda device")
parser.add_argument("--max_seq_len", type=int, default=100, help="Maximum Sequence Length")
parser.add_argument("--batch_size", type=int, default=16, help="Batch Size")
args = parser.parse_args()
device = torch.device(args.device)
print("RUNNING ON DEVICE:", device)
INPATH = args.in_path
OUTDIR = args.out_dir
MODEL_PATH = args.model_dir
TEXT_COL_HEAD = args.text_col
MAX_SEQ_LEN = args.max_seq_len
BATCH_SIZE = args.batch_size
#%% INITTIALIZE TOKENIZER ***************************************************
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
PAD_INDEX = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
UNK_INDEX = tokenizer.convert_tokens_to_ids(tokenizer.unk_token)
# label_field = Field(sequential=False, use_vocab=False, batch_first=True, dtype=torch.float)
text_field = Field(use_vocab=False, tokenize=tokenizer.encode, lower=False, include_lengths=False, batch_first=True,
fix_length=MAX_SEQ_LEN, pad_token=PAD_INDEX, unk_token=UNK_INDEX)
#%% MODULES *****************************************************************
class BERT(nn.Module):
def __init__(self):
super(BERT, self).__init__()
self.encoder = BertForSequenceClassification.from_pretrained(MODEL_PATH, num_labels = 3)
def forward(self, text, label= None):
output = self.encoder(text, labels=label)[:2]
if not label is None:
loss, text_fea = output
else:
text_fea = output[0]
loss = None
return loss, text_fea
classmap = ['Neutral', 'AntiVax', 'ProVax']
def predict(data_path, model):
model.eval()
# READ CSV FILE AND CREATE AN ITERATOR
with open(data_path) as fp:
rdr = csv.reader(fp)
txtcol = next(rdr).index(TEXT_COL_HEAD)
fields = [(None, None)] * (txtcol) + [('text', text_field)]
data = TabularDataset(data_path, format='CSV', fields = fields , skip_header = True)
test_iter = Iterator(data, batch_size=BATCH_SIZE, device=device, train=False, shuffle=False, sort=False)
# PREDICT CLASS PROBABILITIES AND COMPUTE STANCE
y_pred = []
raw = []
with torch.no_grad():
for text, _ in test_iter:
text = text.type(torch.LongTensor)
text = text.to(device)
output = model(text)
_, output = output
raw.extend(F.softmax(output, 1).tolist())
y_pred.extend(torch.argmax(output, 1).tolist())
return [classmap[x] for x in y_pred], raw
#%% MAIN *******************************************************************
model = BERT().to(device)
print("Model Loaded from:", MODEL_PATH)
print("Saving predicted files to:", OUTDIR)
if os.path.isfile(INPATH):
INDIR, fn = os.path.split(INPATH)
files = [fn]
else:
INDIR = INPATH
files = sorted([f for f in next(os.walk(INDIR))[2]])
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
for fn in files:
t0 = time.time()
print("\nCalculating for file:", fn, flush = True)
path = os.path.join(INDIR, fn)
preds, raw = predict(path, model)
with open(path) as fp:
rdr = csv.reader(fp)
head = next(rdr)
rdr = list(rdr)
with open(os.path.join(OUTDIR, fn), 'w') as fo:
wrt = csv.writer(fo)
wrt.writerow(head + ['pred', 'p(Neutral)', 'p(AntiVax)', 'p(ProVax)'])
for row, pr, rs in zip(rdr, preds, raw):
wrt.writerow(row + [pr]+ [round(x, 4) for x in rs])
t1 = time.time()
print("Completed in %0.1f mins"%((t1 - t0) / 60))
| 5,168 | 33.46 | 176 | py |
libgpuarray | libgpuarray-master/pygpu/tests/test_gpu_ndarray.py | from __future__ import print_function
import unittest
import copy
from six.moves import range
from six import PY3
import pickle
import numpy
from nose.tools import assert_raises
import pygpu
from pygpu.gpuarray import GpuArray, GpuKernel
from .support import (guard_devsup, check_meta, check_flags, check_all,
check_content, gen_gpuarray, context as ctx, dtypes_all,
dtypes_no_complex, skip_single_f)
def product(*args, **kwds):
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x + [y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
def permutations(elements):
if len(elements) <= 1:
yield elements
else:
for perm in permutations(elements[1:]):
for i in range(len(elements)):
yield perm[:i] + elements[:1] + perm[i:]
def test_hash():
g = pygpu.empty((2, 3), context=ctx)
exc = None
try:
hash(g)
except TypeError as e:
exc = e
assert exc is not None
def test_bool():
for data in [numpy.empty((0, 33)), [[1]], [[0]], [], [0], [1], 0, 1]:
assert (bool(pygpu.asarray(data, context=ctx)) ==
bool(numpy.asarray(data)))
def test_transfer():
for shp in [(), (5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
for offseted in [True, False]:
yield transfer, shp, dtype, offseted
def transfer(shp, dtype, offseted):
a, b = gen_gpuarray(shp, dtype, offseted, ctx=ctx)
# Test that passing dtype doesn't break.
c = numpy.asarray(b, dtype=dtype)
c = numpy.asarray(b)
assert numpy.allclose(c, a)
assert a.shape == b.shape == c.shape
assert a.strides == b.strides == c.strides
assert a.dtype == b.dtype == c.dtype == dtype
assert c.flags.c_contiguous
def test_cast():
for shp in [(), (5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype1 in dtypes_no_complex:
for dtype2 in dtypes_no_complex:
yield cast, shp, dtype1, dtype2
@guard_devsup
def cast(shp, dtype1, dtype2):
a, b = gen_gpuarray(shp, dtype1, False, ctx=ctx)
ac = a.astype(dtype2)
bc = b.astype(dtype2)
assert ac.dtype == bc.dtype
assert ac.shape == bc.shape
assert numpy.allclose(a, numpy.asarray(b))
def test_transfer_not_contiguous():
for shp in [(5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
yield transfer_not_contiguous, shp, dtype
@guard_devsup
def transfer_not_contiguous(shp, dtype):
a = numpy.random.rand(*shp) * 10
b = pygpu.array(a, context=ctx)
a = a[::-1]
b = b[::-1]
c = numpy.asarray(b)
assert numpy.allclose(c, a)
assert a.shape == b.shape == c.shape
# the result array (c) is C contiguous
assert a.strides == b.strides == (-c.strides[0],) + c.strides[1:]
assert a.dtype == b.dtype == c.dtype
assert c.flags.c_contiguous
def test_transfer_fortran():
for shp in [(5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
yield transfer_fortran, shp, dtype
@guard_devsup
def transfer_fortran(shp, dtype):
a = numpy.random.rand(*shp) * 10
b = pygpu.array(a, context=ctx)
a_ = numpy.asfortranarray(a)
if len(shp) > 1:
assert a_.strides != a.strides
a = a_
b = pygpu.asfortranarray(b)
c = numpy.asarray(b)
assert a.shape == b.shape == c.shape
assert a.dtype == b.dtype == c.dtype
assert a.flags.f_contiguous
assert c.flags.f_contiguous
assert a.strides == b.strides == c.strides
assert numpy.allclose(c, a)
def test_ascontiguousarray():
for shp in [(), (5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
for offseted_o in [True, False]:
for offseted_i in [True, True]:
for sliced in [1, 2, -1, -2]:
for order in ['f', 'c']:
yield (ascontiguousarray, shp, dtype, offseted_o,
offseted_i, sliced, order)
@guard_devsup
def ascontiguousarray(shp, dtype, offseted_o, offseted_i, sliced, order):
cpu, gpu = gen_gpuarray(shp, dtype, offseted_o, offseted_i, sliced, order,
ctx=ctx)
a = numpy.ascontiguousarray(cpu)
b = pygpu.ascontiguousarray(gpu)
# numpy upcast with a view to 1d scalar.
if (sliced != 1 or shp == () or (offseted_i and len(shp) > 1)):
assert b is not gpu
if sliced == 1 and not offseted_i:
assert (a.data is cpu.data) == (b.bytes is gpu.bytes)
else:
assert b is gpu
assert a.shape == b.shape
assert a.dtype == b.dtype
assert a.flags.c_contiguous
assert b.flags['C_CONTIGUOUS']
assert a.strides == b.strides
assert numpy.allclose(cpu, a)
assert numpy.allclose(cpu, b)
def test_asfortranarray():
for shp in [(), (5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
for offseted_outer in [True, False]:
for offseted_inner in [True, False]:
for sliced in [1, 2, -1, -2]:
for order in ['f', 'c']:
yield (asfortranarray, shp, dtype, offseted_outer,
offseted_inner, sliced, order)
@guard_devsup
def asfortranarray(shp, dtype, offseted_outer, offseted_inner, sliced, order):
cpu, gpu = gen_gpuarray(shp, dtype, offseted_outer, offseted_inner, sliced,
order, ctx=ctx)
a = numpy.asfortranarray(cpu)
b = pygpu.asfortranarray(gpu)
# numpy upcast with a view to 1d scalar.
if gpu.flags['F_CONTIGUOUS']:
assert ctx.kind != b'cuda' or b.gpudata == gpu.gpudata
elif (sliced != 1 or shp == () or (offseted_outer and len(shp) > 1) or
(order != 'f' and len(shp) > 1)):
assert b is not gpu
else:
assert b is gpu
assert a.shape == b.shape
assert a.dtype == b.dtype
assert a.flags.f_contiguous
assert b.flags['F_CONTIGUOUS']
if not any([s == 1 for s in cpu.shape]):
# Older version then Numpy 1.10 do not set c/f contiguous more
# frequently as we do. This cause extra copy.
assert a.strides == b.strides
assert numpy.allclose(cpu, a)
assert numpy.allclose(cpu, b)
def test_zeros():
for shp in [(), (0,), (5,),
(0, 0), (1, 0), (0, 1), (6, 7),
(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1),
(4, 8, 9), (1, 8, 9)]:
for order in ["C", "F"]:
for dtype in dtypes_all:
yield zeros, shp, order, dtype
@guard_devsup
def zeros(shp, order, dtype):
x = pygpu.zeros(shp, dtype, order, context=ctx)
y = numpy.zeros(shp, dtype, order)
check_all(x, y)
def test_zeros_no_dtype():
# no dtype and order param
x = pygpu.zeros((), context=ctx)
y = numpy.zeros(())
check_meta(x, y)
def test_zero_noparam():
try:
pygpu.zeros()
assert False
except TypeError:
pass
def test_empty():
for shp in [(), (0,), (5,),
(0, 0), (1, 0), (0, 1), (6, 7),
(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1),
(4, 8, 9), (1, 8, 9)]:
for order in ["C", "F"]:
for dtype in dtypes_all:
yield empty, shp, order, dtype
def empty(shp, order, dtype):
x = pygpu.empty(shp, dtype, order, context=ctx)
y = numpy.empty(shp, dtype, order)
check_meta(x, y)
def test_empty_no_dtype():
x = pygpu.empty((), context=ctx) # no dtype and order param
y = numpy.empty(())
check_meta(x, y)
def test_empty_no_params():
try:
pygpu.empty()
assert False
except TypeError:
pass
def test_mapping_getitem_ellipsis():
for shp in [(), (5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
for offseted in [True, False]:
yield mapping_getitem_ellipsis, shp, dtype, offseted
def mapping_getitem_ellipsis(shp, dtype, offseted):
a, a_gpu = gen_gpuarray(shp, dtype, offseted, ctx=ctx)
b = a_gpu[...]
if ctx.kind == b'cuda':
assert b.gpudata == a_gpu.gpudata
assert b.strides == a.strides
assert b.shape == a.shape
b_cpu = numpy.asarray(b)
assert numpy.allclose(a, b_cpu)
def test_getitem_none():
for shp in [(), (5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
yield getitem_none, shp
def getitem_none(shp):
a, a_gpu = gen_gpuarray(shp, ctx=ctx)
assert numpy.allclose(a_gpu[..., None], a[..., None])
for _ in range(5):
# Choose something to slice with, always works
indcs = tuple(numpy.random.choice([0, slice(None), slice(1, None)],
size=len(shp)))
indcs = indcs[:1] + (None,) + indcs[1:]
assert numpy.allclose(a_gpu[indcs], a[indcs])
if shp:
assert numpy.allclose(a_gpu[1:, None], a[1:, None])
def test_mapping_setitem():
for shp in [(9,), (8, 9), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
for offseted in [True, False]:
yield mapping_setitem_ellipsis, shp, dtype, offseted
yield mapping_setitem_ellipsis2, shp, dtype, offseted
yield mapping_setitem_firstaxis, shp, dtype, offseted
@guard_devsup
def mapping_setitem_ellipsis(shp, dtype, offseted):
a, a_gpu = gen_gpuarray(shp, dtype, offseted, ctx=ctx)
a[...] = 2
a_gpu[...] = 2
assert numpy.allclose(a, numpy.asarray(a_gpu))
@guard_devsup
def mapping_setitem_ellipsis2(shp, dtype, offseted):
a, a_gpu = gen_gpuarray(shp, dtype, offseted, ctx=ctx)
b, b_gpu = gen_gpuarray(shp[1:], dtype, False, ctx=ctx)
a[:] = b
a_gpu[:] = b_gpu
assert numpy.allclose(a, numpy.asarray(a_gpu))
@guard_devsup
def mapping_setitem_firstaxis(shp, dtype, offseted):
a, a_gpu = gen_gpuarray(shp, dtype, offseted, ctx=ctx)
b, b_gpu = gen_gpuarray(shp[1:], dtype, False, ctx=ctx)
a[0] = b
a_gpu[0] = b_gpu
assert numpy.allclose(a, numpy.asarray(a_gpu))
class WriteReadTest(unittest.TestCase):
def setUp(self):
self.cpu, self.gpu = gen_gpuarray((3, 4, 5), ctx=ctx)
self.cpu[0, 0, 0] = 80
def test_write(self):
self.gpu.write(self.cpu)
res = numpy.asarray(self.gpu)
assert numpy.allclose(self.cpu, res)
self.cpu[0, 0, 0] = 160
self.cpu.setflags(write=False)
self.gpu.write(self.cpu)
res = numpy.asarray(self.gpu)
assert numpy.allclose(self.cpu, res)
self.cpu = numpy.ndarray((2, 4, 5), dtype="float32", order='C')
self.assertRaises(ValueError, self.gpu.write, self.cpu)
self.cpu = numpy.ndarray((3, 4, 5), dtype="float64", order='C')
self.assertRaises(ValueError, self.gpu.write, self.cpu)
cpu2 = numpy.random.random((3, 4, 5))
cpu2 = numpy.asarray(cpu2, dtype='float32', order='F')
self.gpu.write(cpu2)
res = numpy.asarray(self.gpu)
assert numpy.allclose(cpu2, res)
cpu2 = numpy.random.random((3, 4, 2, 5))
cpu2 = numpy.asarray(cpu2, dtype='float32', order='C')
self.gpu.write(cpu2[:, :, 0, :])
res = numpy.asarray(self.gpu)
assert numpy.allclose(cpu2[:, :, 0, :], res)
cpu2 = numpy.random.random((3, 4, 2, 5))
cpu2 = numpy.asarray(cpu2, dtype='float32', order='F')
self.gpu.write(cpu2[:, :, 0, :])
res = numpy.asarray(self.gpu)
assert numpy.allclose(cpu2[:, :, 0, :], res)
def test_read(self):
self.gpu.read(self.cpu)
res = numpy.asarray(self.gpu)
assert numpy.allclose(self.cpu, res)
self.cpu = numpy.ndarray((3, 4, 5), dtype="float32", order='C')
self.cpu.setflags(write=False)
self.assertRaises(ValueError, self.gpu.read, self.cpu)
self.cpu = numpy.ndarray((2, 4, 5), dtype="float32", order='C')
self.assertRaises(ValueError, self.gpu.read, self.cpu)
self.cpu = numpy.ndarray((3, 4, 5), dtype="float64", order='C')
self.assertRaises(ValueError, self.gpu.read, self.cpu)
self.cpu = numpy.ndarray((3, 4, 5), dtype="float32", order='F')
self.assertRaises(ValueError, self.gpu.read, self.cpu)
self.cpu = numpy.ndarray((3, 4, 2, 5), dtype="float32", order='C')
self.assertRaises(ValueError, self.gpu.read, self.cpu[:, :, 0, :])
def test_copy_view():
for shp in [(5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
for offseted in [False, True]:
# order1 is the order of the original data
for order1 in ['c', 'f']:
# order2 is the order wanted after copy
for order2 in ['c', 'f']:
yield copy_view, shp, dtype, offseted, order1, order2
def check_memory_region(a, a_op, b, b_op):
assert (numpy.may_share_memory(a, a_op) ==
pygpu.gpuarray.may_share_memory(b, b_op))
@guard_devsup
def copy_view(shp, dtype, offseted, order1, order2):
# TODO test copy unbroadcast!
a, b = gen_gpuarray(shp, dtype, offseted, order=order1, ctx=ctx)
assert numpy.allclose(a, numpy.asarray(b))
check_flags(b, a)
c = b.copy(order2)
assert numpy.allclose(a, numpy.asarray(c))
check_flags(c, a.copy(order2))
check_memory_region(a, a.copy(order2), b, c)
d = copy.copy(b)
assert numpy.allclose(a, numpy.asarray(d))
check_flags(d, copy.copy(a))
check_memory_region(a, copy.copy(a), b, d)
e = b.view()
assert numpy.allclose(a, numpy.asarray(e))
check_flags(e, a.view())
check_memory_region(a, a.view(), b, e)
f = copy.deepcopy(b)
assert numpy.allclose(a, numpy.asarray(f))
check_flags(f, copy.deepcopy(a))
check_memory_region(a, copy.deepcopy(a), b, f)
g = copy.copy(b.view())
assert numpy.allclose(a, numpy.asarray(g))
check_memory_region(a, copy.copy(a.view()), b, g)
check_flags(g, copy.copy(a.view()))
def test_shape():
for shps in [((), (1,)), ((5,), (1, 5)), ((5,), (5, 1)), ((2, 3), (6,)),
((6,), (2, 3)), ((1,), ()),
((4,), (-1,)), ((4, 3), (-1,)),
((4, 3), (-1, 3)), ((4, 3), (4, -1)), ((4, 3), (3, -1)),
((4, 3), (12, -1)), ((4, 3), (-1, 12)),
((5, 4, 3, 2), (2, -1, 12)), ((4, 2), (2, 2, -1)),
# ((4, 3), (13, -1)),
]:
for offseted in [True, False]:
for order1 in ['c', 'f']:
if -1 not in shps[1]:
yield shape_, shps, offseted, order1
for order2 in ['a', 'c', 'f']:
yield reshape, shps, offseted, order1, order2
def shape_(shps, offseted, order):
ac, ag = gen_gpuarray(shps[0], 'float32', offseted, order=order, ctx=ctx)
try:
ac.shape = shps[1]
except AttributeError:
# If numpy says it can't be done, we don't try to test it
return
ag.shape = shps[1]
assert ac.strides == ag.strides, (ac.strides, ag.strides)
# np.allclose don't test shapes
assert ac.shape == ag.shape, (ac.shape, ag.shape)
assert numpy.allclose(ac, numpy.asarray(ag))
def reshape(shps, offseted, order1, order2):
ac, ag = gen_gpuarray(shps[0], 'float32', offseted, order=order1, ctx=ctx)
outc = ac.reshape(shps[1], order=order2)
outg = ag.reshape(shps[1], order=order2)
assert outc.shape == outg.shape
assert outc.strides == outg.strides
assert numpy.allclose(outc, numpy.asarray(outg))
def test_strides():
yield strides_, (4, 4), 'c', 1, (4, 4)
yield strides_, (4, 4), 'c', 1, (4, 16)
yield strides_, (4, 4), 'c', 1, (16, 4)
yield strides_, (4, 4), 'c', 1, (16, 8)
yield strides_, (4, 4), 'c', 1, (16, 0)
yield strides_, (4, 4), 'c', -1, (-20, 4)
yield strides_, (4, 4), 'c', -1, (-12, 4)
def set_strides(a, newstr):
a.strides = newstr
def strides_(shp, order, sliced, newstr):
ac, ag = gen_gpuarray(shp, 'float32', sliced=sliced, order=order, ctx=ctx)
try:
ac.strides = newstr
except ValueError:
assert_raises(ValueError, set_strides, ag, newstr)
return
ag.strides = newstr
check_flags(ag, ac)
assert numpy.allclose(ac, numpy.asarray(ag))
def test_transpose():
for shp in [(2, 3), (4, 8, 9), (1, 2, 3, 4)]:
for offseted in [True, False]:
for order in ['c', 'f']:
for sliced in [1, 2, -2, -1]:
yield transpose, shp, offseted, sliced, order
for perm in permutations(list(range(len(shp)))):
yield (transpose_perm, shp, perm, offseted, sliced,
order)
def transpose(shp, offseted, sliced, order):
ac, ag = gen_gpuarray(shp, 'float32', offseted, sliced=sliced,
order=order, ctx=ctx)
rc = ac.transpose()
rg = ag.transpose()
check_all(rg, rc)
# also check that we are exactly equal since this only a copy op
assert numpy.all(rc == numpy.asarray(rg))
# Test NumPy interface
rg = numpy.transpose(ag)
check_all(rg, rc)
# also check that we are exactly equal since this only a copy op
assert numpy.all(rc == numpy.asarray(rg))
def transpose_perm(shp, perm, offseted, sliced, order):
ac, ag = gen_gpuarray(shp, 'float32', offseted, sliced=sliced,
order=order, ctx=ctx)
rc = ac.transpose(perm)
rg = ag.transpose(perm)
check_all(rg, rc)
# also check that we are exactly equal since this only a copy op
assert numpy.all(rc == numpy.asarray(rg))
# Test NumPy interface
rg = numpy.transpose(ag, perm)
check_all(rg, rc)
# also check that we are exactly equal since this only a copy op
assert numpy.all(rc == numpy.asarray(rg))
def test_transpose_args():
ac, ag = gen_gpuarray((4, 3, 2), 'float32', ctx=ctx)
rc = ac.transpose(0, 2, 1)
rg = ag.transpose(0, 2, 1)
check_all(rg, rc)
# also check that we are exactly equal since this only a copy op
assert numpy.all(rc == numpy.asarray(rg))
def test_len():
for shp in [(5,), (6, 7), (4, 8, 9), (1, 8, 9)]:
for dtype in dtypes_all:
for offseted in [True, False]:
yield len_, shp, dtype, offseted
def len_(shp, dtype, offseted):
a, a_gpu = gen_gpuarray(shp, dtype, offseted, ctx=ctx)
assert len(a_gpu) == shp[0]
def test_mapping_getitem_w_int():
for dtype in dtypes_all:
for offseted in [True, False]:
yield mapping_getitem_w_int, dtype, offseted
@guard_devsup
def mapping_getitem_w_int(dtype, offseted):
# test vector
dim = (2,)
a, _a = gen_gpuarray(dim, dtype, offseted, ctx=ctx)
_cmp(_a[...], a[...])
_cmp(_a[...], a[...])
_cmp(_a[...], a[...])
_cmp(_a[...], a[...])
_cmp(_a[...], a[...])
_cmp(_a[-1], a[-1])
_cmp(_a[1], a[1])
_cmp(_a[0], a[0])
_cmp(_a[::1], a[::1])
_cmpNs(_a[::-1], a[::-1])
_cmp(_a[...], a[...])
_cmpf(_a, 2)
# test scalar
dim = ()
a, _a = gen_gpuarray(dim, dtype, offseted, ctx=ctx)
_cmp(_a[...], a[...])
_cmpf(_a, 0)
_cmpf(_a, slice(1))
# test 4d-tensor
dim = (5, 4, 3, 2)
a, _a = gen_gpuarray(dim, dtype, offseted, ctx=ctx)
_cmpf(_a, slice(-1), slice(-1), 10, -10)
_cmpf(_a, slice(-1), slice(-1), -10, slice(-1))
_cmpf(_a, 0, slice(0, -1, -20), -10)
_cmpf(_a, 10)
_cmpf(_a, (10, 0, 0, 0))
_cmpf(_a, -10)
# test with integer
_cmp(_a[1], a[1])
_cmp(_a[-1], a[-1])
_cmp(_a[numpy.int64(1)], a[numpy.int64(1)])
_cmp(_a[numpy.int64(-1)], a[numpy.int64(-1)])
# test with slice
_cmp(_a[1:], a[1:])
_cmp(_a[1:2], a[1:2])
_cmp(_a[-1:1], a[-1:1])
_cmp(_a[6:7:], a[6:7:])
# test with tuple (mix slice, integer, numpy.int64)
_cmpNs(_a[0, 0, ::numpy.int64(-1), ::-1], a[0, 0, ::-1, ::-1])
_cmpNs(_a[:, :, ::numpy.int64(-1), ::-1], a[:, :, ::-1, ::-1])
_cmpNs(_a[:, :, numpy.int64(1), -1], a[:, :, 1, -1])
_cmpNs(_a[:, :, ::-1, ::-1], a[:, :, ::-1, ::-1])
_cmpNs(_a[:, :, ::-10, ::-10], a[:, :, ::-10, ::-10])
_cmpNs(_a[:, :, 1, -1], a[:, :, 1, -1])
_cmpNs(_a[:, :, -1, :], a[:, :, -1, :])
_cmpNs(_a[:, ::-2, -1, :], a[:, ::-2, -1, :])
_cmpNs(_a[:, ::-20, -1, :], a[:, ::-20, -1, :])
_cmpNs(_a[:, ::-2, -1], a[:, ::-2, -1])
_cmpNs(_a[0, ::-2, -1], a[0, ::-2, -1])
_cmp(_a[-1, -1, -1, -2], a[-1, -1, -1, -2])
# test ellipse
_cmp(_a[...], a[...])
def _cmp(x, y):
assert isinstance(x, GpuArray)
assert x.shape == y.shape
assert x.dtype == y.dtype
assert x.strides == y.strides
assert x.flags["C_CONTIGUOUS"] == y.flags["C_CONTIGUOUS"], (x.flags,
y.flags)
if y.size == 0:
# F_CONTIGUOUS flags change definition with different numpy version
# TODO: ideally, we should be F_CONTIGUOUS in that case.
pass
elif not (skip_single_f and y.shape == ()):
assert x.flags["F_CONTIGUOUS"] == y.flags["F_CONTIGUOUS"], (x.flags,
y.flags)
else:
assert x.flags["F_CONTIGUOUS"]
# GpuArrays always own their data so don't check that flag.
if x.flags["WRITEABLE"] != y.flags["WRITEABLE"]:
assert x.ndim == 0
assert x.flags["ALIGNED"] == y.flags["ALIGNED"], (x.flags, y.flags)
assert x.flags["UPDATEIFCOPY"] == y.flags["UPDATEIFCOPY"], (x.flags,
y.flags)
x = numpy.asarray(x)
assert x.shape == y.shape
assert x.dtype == y.dtype
assert x.strides == y.strides
if not numpy.all(x == y):
print(x)
print(y)
assert numpy.all(x == y), (x, y)
def _cmpNs(x, y):
"""
Don't compare the stride after the transfer
There is a copy that have been made on the gpu before the transfer
"""
assert x.shape == y.shape
assert x.dtype == y.dtype
assert x.strides == y.strides
assert x.flags["C_CONTIGUOUS"] == y.flags["C_CONTIGUOUS"]
assert x.flags["F_CONTIGUOUS"] == y.flags["F_CONTIGUOUS"]
assert x.flags["WRITEABLE"] == y.flags["WRITEABLE"]
assert x.flags["ALIGNED"] == y.flags["ALIGNED"]
# we don't check owndata since it is always true for GpuArrays
assert x.flags["UPDATEIFCOPY"] == y.flags["UPDATEIFCOPY"]
x_ = numpy.asarray(x)
assert x_.shape == y.shape
assert x_.dtype == y.dtype
assert numpy.all(x_ == y), (x_, y)
def _cmpf(x, *y):
try:
x.__getitem__(y)
except IndexError:
pass
else:
raise Exception("Did not generate out or bound error")
def _cmpfV(x, *y):
try:
if len(y) == 1:
x.__getitem__(*y)
else:
x.__getitem__(y)
except ValueError:
pass
else:
raise Exception("Did not generate value error")
def test_take1():
yield do_take1, (4, 3), [2, 0], False
yield do_take1, (4, 3), [2, 0], True
yield do_take1, (12, 4, 3), [1, 1, 1, 1, 1, 2, 2, 3, 3, 0, 0, 9], False
def do_take1(shp, idx, offseted):
c, g = gen_gpuarray(shp, dtype='float32', ctx=ctx, order='c')
ci = numpy.asarray(idx)
gi = pygpu.asarray(ci, context=ctx)
rc = c.take(ci, axis=0)
rg = g.take1(gi)
check_content(rg, rc)
def test_flags():
for fl in ['C', 'F', 'W', 'B', 'O', 'A', 'U', 'CA', 'FA', 'FNC', 'FORC',
'CARRAY', 'FARRAY', 'FORTRAN', 'BEHAVED', 'OWNDATA', 'ALIGNED',
'WRITEABLE', 'CONTIGUOUS', 'UPDATEIFCOPY', 'C_CONTIGUOUS',
'F_CONTIGUOUS']:
yield flag_dict, fl
for p in ['c_contiguous', 'f_contiguous', 'contiguous', 'fortran',
'updateifcopy', 'owndata', 'aligned', 'writeable', 'behaved',
'carray', 'forc', 'fnc', 'farray']:
yield flag_prop, p
def flag_dict(fl):
c2, g2 = gen_gpuarray((2, 3), dtype='float32', ctx=ctx, order='c')
c3, g3 = gen_gpuarray((2, 3), dtype='float32', ctx=ctx, order='f')
assert c2.flags[fl] == g2.flags[fl]
assert c3.flags[fl] == g3.flags[fl]
def flag_prop(p):
c2, g2 = gen_gpuarray((2, 3), dtype='float32', ctx=ctx, order='c')
c3, g3 = gen_gpuarray((2, 3), dtype='float32', ctx=ctx, order='f')
assert getattr(c2.flags, p) == getattr(g2.flags, p)
assert getattr(c3.flags, p) == getattr(g3.flags, p)
class TestPickle(unittest.TestCase):
def test_GpuArray(self):
with self.assertRaises(RuntimeError):
pickle.dumps(pygpu.zeros((32,), context=ctx))
with self.assertRaises(RuntimeError):
pickle.dumps(pygpu.zeros((32,), context=ctx), protocol=0)
with self.assertRaises(RuntimeError):
pickle.dumps(pygpu.zeros((32,), context=ctx), protocol=1)
with self.assertRaises(RuntimeError):
pickle.dumps(pygpu.zeros((32,), context=ctx), protocol=2)
if PY3:
with self.assertRaises(RuntimeError):
pickle.dumps(pygpu.zeros((32,), context=ctx), protocol=3)
with self.assertRaises(RuntimeError):
pickle.dumps(pygpu.zeros((32,), context=ctx), protocol=-1)
def test_GpuContext(self):
with self.assertRaises(RuntimeError):
pickle.dumps(ctx)
with self.assertRaises(RuntimeError):
pickle.dumps(ctx, protocol=0)
with self.assertRaises(RuntimeError):
pickle.dumps(ctx, protocol=1)
with self.assertRaises(RuntimeError):
pickle.dumps(ctx, protocol=2)
if PY3:
with self.assertRaises(RuntimeError):
pickle.dumps(ctx, protocol=3)
with self.assertRaises(RuntimeError):
pickle.dumps(ctx, protocol=-1)
def test_GpuKernel(self):
k = GpuKernel("#include \"cluda.h\"\nKERNEL void "
"k(GLOBAL_MEM ga_float *in)"
"{in[0] = 0;}", "k", [], context=ctx)
with self.assertRaises(RuntimeError):
pickle.dumps(k)
with self.assertRaises(RuntimeError):
pickle.dumps(k, protocol=0)
with self.assertRaises(RuntimeError):
pickle.dumps(k, protocol=1)
with self.assertRaises(RuntimeError):
pickle.dumps(k, protocol=2)
if PY3:
with self.assertRaises(RuntimeError):
pickle.dumps(k, protocol=3)
with self.assertRaises(RuntimeError):
pickle.dumps(k, protocol=-1)
| 26,790 | 31.162065 | 79 | py |
GENESIM | GENESIM-master/constructors/ensemble.py | """
Contains wrappers around well-known ensemble techniques: Random Forest and XGBoost.
Written by Gilles Vandewiele in commission of IDLab - INTEC from University Ghent.
"""
import time
from bayes_opt import BayesianOptimization
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import AdaBoostClassifier
from xgboost import XGBClassifier
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import decisiontree
class EnsembleConstructor(object):
"""This class is an interface for all tree induction algorithms."""
def __init__(self):
"""In the init method, all hyper-parameters should be set."""
self.clf = None
def get_name(self):
"""Get the name of the induction algorithm implemented."""
raise NotImplementedError("This method needs to be implemented")
def construct_classifier(self, train, features, label_col):
"""Construct an ensemble classifier.
**Params**
----------
- `train` (pandas DataFrame) - a `Dataframe` containing all the training data
- `features` (pandas Series or list) - the names of the feature columns
- `label_col` (string) - the name of the class label column
**Returns**
-----------
an ensemble classifier
"""
raise NotImplementedError("This method needs to be implemented")
def evaluate_multiple(self, feature_vectors):
"""Evaluate multiple samples
**Params**
----------
- `feature_vectors` (pandas DataFrame) - a `Dataframe` containing all the feature vectors
**Returns**
-----------
a list of predicted class labels
"""
return self.clf.predict(feature_vectors)
class XGBClassification(EnsembleConstructor):
def get_name(self):
return 'XGBoost'
def __init__(self):
super(XGBClassification, self).__init__()
self.nr_clf = 0
self.time = 0
def construct_classifier(self, train, features, label_col):
data = train[features]
target = train[label_col]
def xgbcv(nr_classifiers, learning_rate, max_depth, min_child_weight, subsample, colsample_bytree, gamma,
reg_lambda):
nr_classifiers = int(nr_classifiers)
max_depth = int(max_depth)
min_child_weight = int(min_child_weight)
return cross_val_score(XGBClassifier(learning_rate=learning_rate, n_estimators=nr_classifiers,
gamma=gamma, subsample=subsample, colsample_bytree=colsample_bytree,
nthread=1, scale_pos_weight=1, reg_lambda=reg_lambda,
min_child_weight=min_child_weight, max_depth=max_depth),
data, target, 'accuracy', cv=5).mean()
params = {
'nr_classifiers': (50, 1000),
'learning_rate': (0.01, 0.3),
'max_depth': (5, 10),
'min_child_weight': (2, 10),
'subsample': (0.7, 0.8),
'colsample_bytree': (0.5, 0.99),
'gamma': (0.01, 1.),
'reg_lambda': (0, 1)
}
xgbBO = BayesianOptimization(xgbcv, params, verbose=0)
xgbBO.maximize(init_points=10, n_iter=20, n_restarts_optimizer=50)
# xgbBO.maximize(init_points=1, n_iter=1, n_restarts_optimizer=100)
best_params = xgbBO.res['max']['max_params']
best_nr_classifiers = int(best_params['nr_classifiers'])
self.nr_clf = best_nr_classifiers
best_max_depth = int(best_params['max_depth'])
best_min_child_weight = int(best_params['min_child_weight'])
best_colsample_bytree = best_params['colsample_bytree']
best_subsample = best_params['subsample']
best_reg_lambda = best_params['reg_lambda']
best_learning_rate = best_params['learning_rate']
best_gamma = best_params['gamma']
print(best_nr_classifiers)
self.clf = XGBClassifier(learning_rate=best_learning_rate, n_estimators=best_nr_classifiers,
gamma=best_gamma, subsample=best_subsample, colsample_bytree=best_colsample_bytree,
nthread=1, scale_pos_weight=1, reg_lambda=best_reg_lambda,
min_child_weight=best_min_child_weight, max_depth=best_max_depth)
start = time.time()
self.clf.fit(data, target)
self.time = time.time() - start
return self
def evaluate_multiple(self, feature_vectors):
return self.clf.predict(feature_vectors)
class RFClassification(EnsembleConstructor):
def get_name(self):
return 'RF'
def __init__(self):
super(RFClassification, self).__init__()
self.nr_clf = 0
self.time = 0
def construct_classifier(self, train, features, label_col):
data = train[features]
target = train[label_col]
def rfcv(nr_classifiers, max_depth, min_samples_leaf, bootstrap, criterion, max_features):
nr_classifiers = int(nr_classifiers)
max_depth = int(max_depth)
min_samples_leaf = int(min_samples_leaf)
if np.round(bootstrap):
bootstrap = True
else:
bootstrap = False
if np.round(criterion):
criterion = 'gini'
else:
criterion = 'entropy'
if np.round(max_features):
max_features = None
else:
max_features = 1.0
return cross_val_score(RandomForestClassifier(n_estimators=nr_classifiers, max_depth=max_depth,
min_samples_leaf=min_samples_leaf, bootstrap=bootstrap,
criterion=criterion, max_features=max_features),
data, target, 'accuracy', cv=5).mean()
params = {
'nr_classifiers': (10, 1000),
'max_depth': (5, 10),
'min_samples_leaf': (2, 10),
'bootstrap': (0, 1),
'criterion': (0, 1),
'max_features': (0, 1)
}
rfBO = BayesianOptimization(rfcv, params, verbose=0)
rfBO.maximize(init_points=10, n_iter=20, n_restarts_optimizer=50)
# rfBO.maximize(init_points=1, n_iter=1, n_restarts_optimizer=50)
best_params = rfBO.res['max']['max_params']
best_nr_classifiers = int(best_params['nr_classifiers'])
self.nr_clf = best_nr_classifiers
best_max_depth = int(best_params['max_depth'])
best_min_samples_leaf = int(best_params['min_samples_leaf'])
best_bootstrap = best_params['bootstrap']
best_criterion = best_params['criterion']
best_max_features = best_params['max_features']
if np.round(best_bootstrap):
best_bootstrap = True
else:
best_bootstrap = False
if np.round(best_criterion):
best_criterion = 'gini'
else:
best_criterion = 'entropy'
if np.round(best_max_features):
best_max_features = None
else:
best_max_features = 1.0
self.clf = RandomForestClassifier(n_estimators=best_nr_classifiers, max_depth=best_max_depth,
min_samples_leaf=best_min_samples_leaf, bootstrap=best_bootstrap,
criterion=best_criterion, max_features=best_max_features)
start = time.time()
self.clf.fit(data, target)
self.time = time.time() - start
return self
def evaluate_multiple(self, feature_vectors):
return self.clf.predict(feature_vectors)
def bootstrap(data, class_label, tree_constructors, bootstrap_features=False, nr_classifiers=3, boosting=True):
"""
Bootstrapping ensemble technique
**Params**
----------
- `data` (DataFrame): containing all the data to be bootstrapped
- `class_label` (string): the column in the dataframe that contains the target variables
- `tree_constructors` (list): the induction algorithms (`constructors.treeconstructor.TreeConstructor`) used
- `bootstrap_features` (boolean): if `True`, then apply bootstrapping to the features as well
- `nr_classifiers` (int): for each `tree_constructor`, how many times must we bootstrap
- `boosting` (boolean): if `True`, then do create models with AdaBoost too
**Returns**
-----------
a vector of fitted classifiers, converted to DecisionTree (`decisiontree.DecisionTree`)
"""
def _convert_to_tree(classifier, features):
n_nodes = classifier.tree_.node_count
children_left = classifier.tree_.children_left
children_right = classifier.tree_.children_right
feature = classifier.tree_.feature
threshold = classifier.tree_.threshold
classes = classifier.classes_
# The tree structure can be traversed to compute various properties such
# as the depth of each node and whether or not it is a leaf.
node_depth = np.zeros(shape=n_nodes)
decision_trees = [None] * n_nodes
for i in range(n_nodes):
decision_trees[i] = decisiontree.DecisionTree()
is_leaves = np.zeros(shape=n_nodes, dtype=bool)
stack = [(0, -1)] # seed is the root node id and its parent depth
while len(stack) > 0:
node_id, parent_depth = stack.pop()
node_depth[node_id] = parent_depth + 1
# If we have a test node
if children_left[node_id] != children_right[node_id]:
stack.append((children_left[node_id], parent_depth + 1))
stack.append((children_right[node_id], parent_depth + 1))
else:
is_leaves[node_id] = True
for i in range(n_nodes):
if children_left[i] > 0:
decision_trees[i].left = decision_trees[children_left[i]]
if children_right[i] > 0:
decision_trees[i].right = decision_trees[children_right[i]]
if is_leaves[i]:
decision_trees[i].label = classes[np.argmax(classifier.tree_.value[i][0])]
decision_trees[i].value = None
else:
decision_trees[i].label = features[feature[i]]
decision_trees[i].value = threshold[i]
return decision_trees[0]
idx = np.random.randint(0, len(data), (nr_classifiers, len(data)))
decision_trees = []
if boosting:
ada = AdaBoostClassifier(base_estimator=None, n_estimators=nr_classifiers, learning_rate=0.25, random_state=1337)
X_train = data.drop(class_label, axis=1).reset_index(drop=True)
y_train = data[class_label].reset_index(drop=True)
ada.fit(X_train, y_train)
for estimator in ada.estimators_:
dt = _convert_to_tree(estimator, X_train.columns)
dt.data = data
dt.populate_samples(X_train, y_train)
decision_trees.append(dt)
for indices in idx:
if bootstrap_features:
features = list(set(np.random.randint(0, len(data.columns), (1, len(data.columns))).tolist()[0]))
X_bootstrap = data.iloc[indices, features].reset_index(drop=True)
if class_label in X_bootstrap.columns:
X_bootstrap = X_bootstrap.drop(class_label, axis=1)
y_bootstrap = data.iloc[indices][class_label].reset_index(drop=True)
else:
X_bootstrap = data.iloc[indices, :].drop(class_label, axis=1).reset_index(drop=True)
y_bootstrap = data.iloc[indices][class_label].reset_index(drop=True)
X = data.drop(class_label, axis=1).reset_index(drop=True)
y = data[class_label].reset_index(drop=True)
train_bootstrap = X_bootstrap.copy()
train_bootstrap[y_bootstrap.name] = y_bootstrap
for tree_constructor in tree_constructors:
tree = tree_constructor.construct_classifier(train_bootstrap, X_bootstrap.columns, y_bootstrap.name)
# print 'Number of nodes in stub:', tree_constructor.get_name(), count_nodes(tree)
# print tree_constructor.get_name(), tree.count_nodes()
tree.data = data.iloc[indices, :].reset_index(drop=True)
tree.populate_samples(X, y)
decision_trees.append(tree)
return decision_trees
| 12,560 | 38.5 | 121 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/NTU_Fi_model.py | import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
class NTU_Fi_MLP(nn.Module):
def __init__(self, num_classes):
super(NTU_Fi_MLP,self).__init__()
self.fc = nn.Sequential(
nn.Linear(3*114*500,1024),
nn.ReLU(),
nn.Linear(1024,128),
nn.ReLU(),
)
self.classifier = nn.Linear(128,num_classes)
def forward(self,x):
x = x.view(-1,3*114*500)
x = self.fc(x)
x = self.classifier(x)
return x
class NTU_Fi_LeNet(nn.Module):
def __init__(self, num_classes):
super(NTU_Fi_LeNet,self).__init__()
self.encoder = nn.Sequential(
#input size: (3,114,500)
nn.Conv2d(3,32,(15,23),stride=9),
nn.ReLU(True),
nn.Conv2d(32,64,3,stride=(1,3)),
nn.ReLU(True),
nn.Conv2d(64,96,(7,3),stride=(1,3)),
nn.ReLU(True),
)
self.fc = nn.Sequential(
nn.Linear(96*4*6,128),
nn.ReLU(),
nn.Linear(128,num_classes)
)
def forward(self,x):
x = self.encoder(x)
x = x.view(-1,96*4*6)
out = self.fc(x)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels*self.expansion, kernel_size=1, stride=1, padding=0)
self.batch_norm3 = nn.BatchNorm2d(out_channels*self.expansion)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.relu(self.batch_norm2(self.conv2(x)))
x = self.conv3(x)
x = self.batch_norm3(x)
#downsample if needed
if self.i_downsample is not None:
identity = self.i_downsample(identity)
#add identity
x+=identity
x=self.relu(x)
return x
class Block(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=1, bias=False)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, bias=False)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.batch_norm2(self.conv2(x))
if self.i_downsample is not None:
identity = self.i_downsample(identity)
x += identity
x = self.relu(x)
return x
class NTU_Fi_ResNet(nn.Module):
def __init__(self, ResBlock, layer_list, num_classes):
super(NTU_Fi_ResNet, self).__init__()
self.reshape = nn.Sequential(
nn.Conv2d(3,3,(15,23),stride=(3,9)),
nn.ReLU(),
nn.Conv2d(3,3,kernel_size=(3,23),stride=1),
nn.ReLU()
)
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.batch_norm1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size = 3, stride=2, padding=1)
self.layer1 = self._make_layer(ResBlock, layer_list[0], planes=64)
self.layer2 = self._make_layer(ResBlock, layer_list[1], planes=128, stride=2)
self.layer3 = self._make_layer(ResBlock, layer_list[2], planes=256, stride=2)
self.layer4 = self._make_layer(ResBlock, layer_list[3], planes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(512*ResBlock.expansion, num_classes)
def forward(self, x):
x = self.reshape(x)
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layer(self, ResBlock, blocks, planes, stride=1):
ii_downsample = None
layers = []
if stride != 1 or self.in_channels != planes*ResBlock.expansion:
ii_downsample = nn.Sequential(
nn.Conv2d(self.in_channels, planes*ResBlock.expansion, kernel_size=1, stride=stride),
nn.BatchNorm2d(planes*ResBlock.expansion)
)
layers.append(ResBlock(self.in_channels, planes, i_downsample=ii_downsample, stride=stride))
self.in_channels = planes*ResBlock.expansion
for i in range(blocks-1):
layers.append(ResBlock(self.in_channels, planes))
return nn.Sequential(*layers)
def NTU_Fi_ResNet18(num_classes):
return NTU_Fi_ResNet(Block, [2,2,2,2], num_classes = num_classes)
def NTU_Fi_ResNet50(num_classes):
return NTU_Fi_ResNet(Bottleneck, [3,4,6,3], num_classes = num_classes)
def NTU_Fi_ResNet101(num_classes):
return NTU_Fi_ResNet(Bottleneck, [3,4,23,3], num_classes = num_classes)
class NTU_Fi_RNN(nn.Module):
def __init__(self,num_classes):
super(NTU_Fi_RNN,self).__init__()
self.rnn = nn.RNN(342,64,num_layers=1)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, ht = self.rnn(x)
outputs = self.fc(ht[-1])
return outputs
class NTU_Fi_GRU(nn.Module):
def __init__(self,num_classes):
super(NTU_Fi_GRU,self).__init__()
self.gru = nn.GRU(342,64,num_layers=1)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, ht = self.gru(x)
outputs = self.fc(ht[-1])
return outputs
class NTU_Fi_LSTM(nn.Module):
def __init__(self,num_classes):
super(NTU_Fi_LSTM,self).__init__()
self.lstm = nn.LSTM(342,64,num_layers=1)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, (ht,ct) = self.lstm(x)
outputs = self.fc(ht[-1])
return outputs
class NTU_Fi_BiLSTM(nn.Module):
def __init__(self,num_classes):
super(NTU_Fi_BiLSTM,self).__init__()
self.lstm = nn.LSTM(342,64,num_layers=1,bidirectional=True)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, (ht,ct) = self.lstm(x)
outputs = self.fc(ht[-1])
return outputs
class NTU_Fi_CNN_GRU(nn.Module):
def __init__(self,num_classes):
super(NTU_Fi_CNN_GRU,self).__init__()
self.encoder = nn.Sequential(
nn.Conv1d(1,16,12,6),
nn.ReLU(),
nn.MaxPool1d(2),
nn.Conv1d(16,32,7,3),
nn.ReLU(),
)
self.mean = nn.AvgPool1d(32)
self.gru = nn.GRU(8,128,num_layers=1)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(128,num_classes),
nn.Softmax(dim=1)
)
def forward(self,x):
batch_size = len(x)
# batch x 3 x 114 x 500
x = x.view(batch_size,3*114,500)
x = x.permute(0,2,1)
# batch x 500 x 342
x = x.reshape(batch_size*500,1, 3*114)
# (batch x 500) x 1 x 342
x = self.encoder(x)
# (batch x 500) x 32 x 8
x = x.permute(0,2,1)
x = self.mean(x)
x = x.reshape(batch_size, 500, 8)
# batch x 500 x 8
x = x.permute(1,0,2)
# 500 x batch x 8
_, ht = self.gru(x)
outputs = self.classifier(ht[-1])
return outputs
class PatchEmbedding(nn.Module):
def __init__(self, in_channels = 1, patch_size_w = 9, patch_size_h = 25, emb_size = 9*25, img_size = 342*500):
self.patch_size_w = patch_size_w
self.patch_size_h = patch_size_h
super().__init__()
self.projection = nn.Sequential(
nn.Conv2d(in_channels, emb_size, kernel_size = (patch_size_w, patch_size_h), stride = (patch_size_w, patch_size_h)),
Rearrange('b e (h) (w) -> b (h w) e'),
)
self.cls_token = nn.Parameter(torch.randn(1,1,emb_size))
self.position = nn.Parameter(torch.randn(int(img_size/emb_size) + 1, emb_size))
def forward(self, x):
x = x.view(-1,1,342,500)
b, _, _, _ = x.shape
x = self.projection(x)
cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
x = torch.cat([cls_tokens, x], dim=1)
x += self.position
return x
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size = 225, num_heads = 5, dropout = 0.0):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
self.qkv = nn.Linear(emb_size, emb_size*3)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
def forward(self, x, mask = None):
qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3)
queries, keys, values = qkv[0], qkv[1], qkv[2]
energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys)
if mask is not None:
fill_value = torch.finfo(torch.float32).min
energy.mask_fill(~mask, fill_value)
scaling = self.emb_size ** (1/2)
att = F.softmax(energy, dim=-1) / scaling
att = self.att_drop(att)
# sum up over the third axis
out = torch.einsum('bhal, bhlv -> bhav ', att, values)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.projection(out)
return out
class ResidualAdd(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
res = x
x = self.fn(x, **kwargs)
x += res
return x
class FeedForwardBlock(nn.Sequential):
def __init__(self, emb_size, expansion = 4, drop_p = 0.):
super().__init__(
nn.Linear(emb_size, expansion * emb_size),
nn.GELU(),
nn.Dropout(drop_p),
nn.Linear(expansion * emb_size, emb_size),
)
class TransformerEncoderBlock(nn.Sequential):
def __init__(self,
emb_size = 225,
drop_p = 0.5,
forward_expansion = 4,
forward_drop_p = 0.,
** kwargs):
super().__init__(
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
MultiHeadAttention(emb_size, **kwargs),
nn.Dropout(drop_p)
)),
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
FeedForwardBlock(
emb_size, expansion=forward_expansion, drop_p=forward_drop_p),
nn.Dropout(drop_p)
)
))
class TransformerEncoder(nn.Sequential):
def __init__(self, depth = 1, **kwargs):
super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)])
class ClassificationHead(nn.Sequential):
def __init__(self, emb_size, num_classes):
super().__init__(
Reduce('b n e -> b e', reduction='mean'),
nn.LayerNorm(emb_size),
nn.Linear(emb_size, num_classes))
class NTU_Fi_ViT(nn.Sequential):
def __init__(self,
in_channels = 1,
patch_size_w = 9,
patch_size_h = 25,
emb_size = 225,
img_size = 342*500,
depth = 1,
*,
num_classes,
**kwargs):
super().__init__(
PatchEmbedding(in_channels, patch_size_w, patch_size_h, emb_size, img_size),
TransformerEncoder(depth, emb_size=emb_size, **kwargs),
ClassificationHead(emb_size, num_classes)
)
| 13,031 | 32.674419 | 128 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/dataset.py | import numpy as np
import glob
import scipy.io as sio
import torch
from torch.utils.data import Dataset, DataLoader
def UT_HAR_dataset(root_dir):
data_list = glob.glob(root_dir+'/UT_HAR/data/*.csv')
label_list = glob.glob(root_dir+'/UT_HAR/label/*.csv')
WiFi_data = {}
for data_dir in data_list:
data_name = data_dir.split('/')[-1].split('.')[0]
with open(data_dir, 'rb') as f:
data = np.load(f)
data = data.reshape(len(data),1,250,90)
data_norm = (data - np.min(data)) / (np.max(data) - np.min(data))
WiFi_data[data_name] = torch.Tensor(data_norm)
for label_dir in label_list:
label_name = label_dir.split('/')[-1].split('.')[0]
with open(label_dir, 'rb') as f:
label = np.load(f)
WiFi_data[label_name] = torch.Tensor(label)
return WiFi_data
# dataset: /class_name/xx.mat
class CSI_Dataset(Dataset):
"""CSI dataset."""
def __init__(self, root_dir, modal='CSIamp', transform=None, few_shot=False, k=5, single_trace=True):
"""
Args:
root_dir (string): Directory with all the images.
modal (CSIamp/CSIphase): CSI data modal
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.modal=modal
self.transform = transform
self.data_list = glob.glob(root_dir+'/*/*.mat')
self.folder = glob.glob(root_dir+'/*/')
self.category = {self.folder[i].split('/')[-2]:i for i in range(len(self.folder))}
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_dir = self.data_list[idx]
y = self.category[sample_dir.split('/')[-2]]
x = sio.loadmat(sample_dir)[self.modal]
# normalize
x = (x - 42.3199)/4.9802
# sampling: 2000 -> 500
x = x[:,::4]
x = x.reshape(3, 114, 500)
if self.transform:
x = self.transform(x)
x = torch.FloatTensor(x)
return x,y
class Widar_Dataset(Dataset):
def __init__(self,root_dir):
self.root_dir = root_dir
self.data_list = glob.glob(root_dir+'/*/*.csv')
self.folder = glob.glob(root_dir+'/*/')
self.category = {self.folder[i].split('/')[-2]:i for i in range(len(self.folder))}
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_dir = self.data_list[idx]
y = self.category[sample_dir.split('/')[-2]]
x = np.genfromtxt(sample_dir, delimiter=',')
# normalize
x = (x - 0.0025)/0.0119
# reshape: 22,400 -> 22,20,20
x = x.reshape(22,20,20)
# interpolate from 20x20 to 32x32
# x = self.reshape(x)
x = torch.FloatTensor(x)
return x,y
| 3,086 | 29.564356 | 105 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/UT_HAR_model.py | import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
class UT_HAR_MLP(nn.Module):
def __init__(self):
super(UT_HAR_MLP,self).__init__()
self.fc = nn.Sequential(
nn.Linear(250*90,1024),
nn.ReLU(),
nn.Linear(1024,128),
nn.ReLU(),
nn.Linear(128,7)
)
def forward(self,x):
x = x.view(-1,250*90)
x = self.fc(x)
return x
class UT_HAR_LeNet(nn.Module):
def __init__(self):
super(UT_HAR_LeNet,self).__init__()
self.encoder = nn.Sequential(
#input size: (1,250,90)
nn.Conv2d(1,32,7,stride=(3,1)),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(32,64,(5,4),stride=(2,2),padding=(1,0)),
nn.ReLU(True),
nn.MaxPool2d(2),
nn.Conv2d(64,96,(3,3),stride=1),
nn.ReLU(True),
nn.MaxPool2d(2)
)
self.fc = nn.Sequential(
nn.Linear(96*4*4,128),
nn.ReLU(),
nn.Linear(128,7)
)
def forward(self,x):
x = self.encoder(x)
x = x.view(-1,96*4*4)
out = self.fc(x)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels*self.expansion, kernel_size=1, stride=1, padding=0)
self.batch_norm3 = nn.BatchNorm2d(out_channels*self.expansion)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.relu(self.batch_norm2(self.conv2(x)))
x = self.conv3(x)
x = self.batch_norm3(x)
if self.i_downsample is not None:
identity = self.i_downsample(identity)
x+=identity
x=self.relu(x)
return x
class Block(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=1, bias=False)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, bias=False)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.batch_norm2(self.conv2(x))
if self.i_downsample is not None:
identity = self.i_downsample(identity)
x += identity
x = self.relu(x)
return x
class UT_HAR_ResNet(nn.Module):
def __init__(self, ResBlock, layer_list, num_classes=7):
super(UT_HAR_ResNet, self).__init__()
self.reshape = nn.Sequential(
nn.Conv2d(1,3,7,stride=(3,1)),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(3,3,kernel_size=(10,11),stride=1),
nn.ReLU()
)
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.batch_norm1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size = 3, stride=2, padding=1)
self.layer1 = self._make_layer(ResBlock, layer_list[0], planes=64)
self.layer2 = self._make_layer(ResBlock, layer_list[1], planes=128, stride=2)
self.layer3 = self._make_layer(ResBlock, layer_list[2], planes=256, stride=2)
self.layer4 = self._make_layer(ResBlock, layer_list[3], planes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(512*ResBlock.expansion, num_classes)
def forward(self, x):
x = self.reshape(x)
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layer(self, ResBlock, blocks, planes, stride=1):
ii_downsample = None
layers = []
if stride != 1 or self.in_channels != planes*ResBlock.expansion:
ii_downsample = nn.Sequential(
nn.Conv2d(self.in_channels, planes*ResBlock.expansion, kernel_size=1, stride=stride),
nn.BatchNorm2d(planes*ResBlock.expansion)
)
layers.append(ResBlock(self.in_channels, planes, i_downsample=ii_downsample, stride=stride))
self.in_channels = planes*ResBlock.expansion
for i in range(blocks-1):
layers.append(ResBlock(self.in_channels, planes))
return nn.Sequential(*layers)
def UT_HAR_ResNet18():
return UT_HAR_ResNet(Block, [2,2,2,2])
def UT_HAR_ResNet50():
return UT_HAR_ResNet(Bottleneck, [3,4,6,3])
def UT_HAR_ResNet101():
return UT_HAR_ResNet(Bottleneck, [3,4,23,3])
class UT_HAR_RNN(nn.Module):
def __init__(self,hidden_dim=64):
super(UT_HAR_RNN,self).__init__()
self.rnn = nn.RNN(90,hidden_dim,num_layers=1)
self.fc = nn.Linear(hidden_dim,7)
def forward(self,x):
x = x.view(-1,250,90)
x = x.permute(1,0,2)
_, ht = self.rnn(x)
outputs = self.fc(ht[-1])
return outputs
class UT_HAR_GRU(nn.Module):
def __init__(self,hidden_dim=64):
super(UT_HAR_GRU,self).__init__()
self.gru = nn.GRU(90,hidden_dim,num_layers=1)
self.fc = nn.Linear(hidden_dim,7)
def forward(self,x):
x = x.view(-1,250,90)
x = x.permute(1,0,2)
_, ht = self.gru(x)
outputs = self.fc(ht[-1])
return outputs
class UT_HAR_LSTM(nn.Module):
def __init__(self,hidden_dim=64):
super(UT_HAR_LSTM,self).__init__()
self.lstm = nn.LSTM(90,hidden_dim,num_layers=1)
self.fc = nn.Linear(hidden_dim,7)
def forward(self,x):
x = x.view(-1,250,90)
x = x.permute(1,0,2)
_, (ht,ct) = self.lstm(x)
outputs = self.fc(ht[-1])
return outputs
class UT_HAR_BiLSTM(nn.Module):
def __init__(self,hidden_dim=64):
super(UT_HAR_BiLSTM,self).__init__()
self.lstm = nn.LSTM(90,hidden_dim,num_layers=1,bidirectional=True)
self.fc = nn.Linear(hidden_dim,7)
def forward(self,x):
x = x.view(-1,250,90)
x = x.permute(1,0,2)
_, (ht,ct) = self.lstm(x)
outputs = self.fc(ht[-1])
return outputs
class UT_HAR_CNN_GRU(nn.Module):
def __init__(self):
super(UT_HAR_CNN_GRU,self).__init__()
self.encoder = nn.Sequential(
#input size: (250,90)
nn.Conv1d(250,250,12,3),
nn.ReLU(True),
nn.Conv1d(250,250,5,2),
nn.ReLU(True),
nn.Conv1d(250,250,5,1)
# 250 x 8
)
self.gru = nn.GRU(8,128,num_layers=1)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(128,7),
nn.Softmax(dim=1)
)
def forward(self,x):
# batch x 1 x 250 x 90
x = x.view(-1,250,90)
x = self.encoder(x)
# batch x 250 x 8
x = x.permute(1,0,2)
# 250 x batch x 8
_, ht = self.gru(x)
outputs = self.classifier(ht[-1])
return outputs
class PatchEmbedding(nn.Module):
def __init__(self, in_channels = 1, patch_size_w = 50, patch_size_h = 18, emb_size = 50*18, img_size = 250*90):
self.patch_size_w = patch_size_w
self.patch_size_h = patch_size_h
super().__init__()
self.projection = nn.Sequential(
nn.Conv2d(in_channels, emb_size, kernel_size = (patch_size_w, patch_size_h), stride = (patch_size_w, patch_size_h)),
Rearrange('b e (h) (w) -> b (h w) e'),
)
self.cls_token = nn.Parameter(torch.randn(1,1,emb_size))
self.position = nn.Parameter(torch.randn(int(img_size/emb_size) + 1, emb_size))
def forward(self, x):
b, _, _, _ = x.shape
x = self.projection(x)
cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
x = torch.cat([cls_tokens, x], dim=1)
x += self.position
return x
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size = 900, num_heads = 5, dropout = 0.0):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
self.qkv = nn.Linear(emb_size, emb_size*3)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
def forward(self, x, mask = None):
qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3)
queries, keys, values = qkv[0], qkv[1], qkv[2]
energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys)
if mask is not None:
fill_value = torch.finfo(torch.float32).min
energy.mask_fill(~mask, fill_value)
scaling = self.emb_size ** (1/2)
att = F.softmax(energy, dim=-1) / scaling
att = self.att_drop(att)
# sum up over the third axis
out = torch.einsum('bhal, bhlv -> bhav ', att, values)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.projection(out)
return out
class ResidualAdd(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
res = x
x = self.fn(x, **kwargs)
x += res
return x
class FeedForwardBlock(nn.Sequential):
def __init__(self, emb_size, expansion = 4, drop_p = 0.):
super().__init__(
nn.Linear(emb_size, expansion * emb_size),
nn.GELU(),
nn.Dropout(drop_p),
nn.Linear(expansion * emb_size, emb_size),
)
class TransformerEncoderBlock(nn.Sequential):
def __init__(self,
emb_size = 900,
drop_p = 0.,
forward_expansion = 4,
forward_drop_p = 0.,
** kwargs):
super().__init__(
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
MultiHeadAttention(emb_size, **kwargs),
nn.Dropout(drop_p)
)),
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
FeedForwardBlock(
emb_size, expansion=forward_expansion, drop_p=forward_drop_p),
nn.Dropout(drop_p)
)
))
class TransformerEncoder(nn.Sequential):
def __init__(self, depth = 1, **kwargs):
super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)])
class ClassificationHead(nn.Sequential):
def __init__(self, emb_size = 900, n_classes = 7):
super().__init__(
Reduce('b n e -> b e', reduction='mean'),
nn.LayerNorm(emb_size),
nn.Linear(emb_size, n_classes))
class UT_HAR_ViT(nn.Sequential):
def __init__(self,
in_channels = 1,
patch_size_w = 50,
patch_size_h = 18,
emb_size = 900,
img_size = 250*90,
depth = 1,
n_classes = 7,
**kwargs):
super().__init__(
PatchEmbedding(in_channels, patch_size_w, patch_size_h, emb_size, img_size),
TransformerEncoder(depth, emb_size=emb_size, **kwargs),
ClassificationHead(emb_size, n_classes)
)
| 12,505 | 32.52815 | 128 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/run.py | import numpy as np
import torch
import torch.nn as nn
import argparse
from util import load_data_n_model
def train(model, tensor_loader, num_epochs, learning_rate, criterion, device):
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)
for epoch in range(num_epochs):
model.train()
epoch_loss = 0
epoch_accuracy = 0
for data in tensor_loader:
inputs,labels = data
inputs = inputs.to(device)
labels = labels.to(device)
labels = labels.type(torch.LongTensor)
optimizer.zero_grad()
outputs = model(inputs)
outputs = outputs.to(device)
outputs = outputs.type(torch.FloatTensor)
loss = criterion(outputs,labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item() * inputs.size(0)
predict_y = torch.argmax(outputs,dim=1).to(device)
epoch_accuracy += (predict_y == labels.to(device)).sum().item() / labels.size(0)
epoch_loss = epoch_loss/len(tensor_loader.dataset)
epoch_accuracy = epoch_accuracy/len(tensor_loader)
print('Epoch:{}, Accuracy:{:.4f},Loss:{:.9f}'.format(epoch+1, float(epoch_accuracy),float(epoch_loss)))
return
def test(model, tensor_loader, criterion, device):
model.eval()
test_acc = 0
test_loss = 0
for data in tensor_loader:
inputs, labels = data
inputs = inputs.to(device)
labels.to(device)
labels = labels.type(torch.LongTensor)
outputs = model(inputs)
outputs = outputs.type(torch.FloatTensor)
outputs.to(device)
loss = criterion(outputs,labels)
predict_y = torch.argmax(outputs,dim=1).to(device)
accuracy = (predict_y == labels.to(device)).sum().item() / labels.size(0)
test_acc += accuracy
test_loss += loss.item() * inputs.size(0)
test_acc = test_acc/len(tensor_loader)
test_loss = test_loss/len(tensor_loader.dataset)
print("validation accuracy:{:.4f}, loss:{:.5f}".format(float(test_acc),float(test_loss)))
return
def main():
root = './Data/'
parser = argparse.ArgumentParser('WiFi Imaging Benchmark')
parser.add_argument('--dataset', choices = ['UT_HAR_data','NTU-Fi-HumanID','NTU-Fi_HAR','Widar'])
parser.add_argument('--model', choices = ['MLP','LeNet','ResNet18','ResNet50','ResNet101','RNN','GRU','LSTM','BiLSTM', 'CNN+GRU','ViT'])
args = parser.parse_args()
train_loader, test_loader, model, train_epoch = load_data_n_model(args.dataset, args.model, root)
criterion = nn.CrossEntropyLoss()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
train(
model=model,
tensor_loader= train_loader,
num_epochs= train_epoch,
learning_rate=1e-3,
criterion=criterion,
device=device
)
test(
model=model,
tensor_loader=test_loader,
criterion=criterion,
device= device
)
return
if __name__ == "__main__":
main()
| 3,185 | 33.258065 | 140 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/self_supervised_model.py | import torch
import torch.nn as nn
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
import torch.nn.functional as F
class MLP_Parrallel(nn.Module):
def __init__(self):
super(MLP_Parrallel, self).__init__()
self.encoder_1 = MLP_encoder()
self.encoder_2 = MLP_encoder()
self.classifier = nn.Linear(128,14)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class MLP_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(MLP_encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(3*114*500,1024),
nn.ReLU(),
nn.Linear(1024,128),
nn.ReLU(),
)
self.mapping = nn.Linear(128, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
x = x.view(-1, 3*114*500)
x = self.encoder(x)
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
class CNN_Parrallel(nn.Module):
def __init__(self):
super(CNN_Parrallel, self).__init__()
self.encoder_1 = CNN_encoder()
self.encoder_2 = CNN_encoder()
self.classifier = nn.Sequential(
nn.Linear(96*4*6,128),
nn.ReLU(),
nn.Linear(128,14)
)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class CNN_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(CNN_encoder, self).__init__()
self.encoder = nn.Sequential(
#input size: (3,114,500)
nn.Conv2d(3,32,(15,23),stride=9),
nn.ReLU(True),
nn.Conv2d(32,64,3,stride=(1,3)),
nn.ReLU(True),
nn.Conv2d(64,96,(7,3),stride=(1,3)),
nn.ReLU(True),
)
self.mapping = nn.Linear(96*4*6, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
self.conv_channel = 96
self.conv_feat_num = 24
def forward(self, x, flag='unsupervised'):
x = self.encoder(x)
# classifier
x = x.view(-1, self.conv_channel*self.conv_feat_num)
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels*self.expansion, kernel_size=1, stride=1, padding=0)
self.batch_norm3 = nn.BatchNorm2d(out_channels*self.expansion)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.relu(self.batch_norm2(self.conv2(x)))
x = self.conv3(x)
x = self.batch_norm3(x)
#downsample if needed
if self.i_downsample is not None:
identity = self.i_downsample(identity)
#add identity
x+=identity
x=self.relu(x)
return x
class Block(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=1, bias=False)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, bias=False)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.batch_norm2(self.conv2(x))
if self.i_downsample is not None:
identity = self.i_downsample(identity)
x += identity
x = self.relu(x)
return x
class ResNet_Parrallel(nn.Module):
def __init__(self,ResBlock, layer_list):
super(ResNet_Parrallel, self).__init__()
self.encoder_1 = ResNet_encoder(ResBlock, layer_list)
self.encoder_2 = ResNet_encoder(ResBlock, layer_list) # output: after BN
self.classifier = nn.Linear(512*ResBlock.expansion, 14)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class ResNet_encoder(nn.Module):
def __init__(self, ResBlock, layer_list, hidden_states = 256):
super(ResNet_encoder, self).__init__()
self.reshape = nn.Sequential(
nn.Conv2d(3,3,(15,23),stride=(3,9)),
nn.ReLU(),
nn.Conv2d(3,3,kernel_size=(3,23),stride=1),
nn.ReLU()
)
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.batch_norm1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size = 3, stride=2, padding=1)
self.layer1 = self._make_layer(ResBlock, layer_list[0], planes=64)
self.layer2 = self._make_layer(ResBlock, layer_list[1], planes=128, stride=2)
self.layer3 = self._make_layer(ResBlock, layer_list[2], planes=256, stride=2)
self.layer4 = self._make_layer(ResBlock, layer_list[3], planes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.mapping = nn.Linear(512*ResBlock.expansion, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
x = self.reshape(x)
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# classifier
x = x.reshape(x.shape[0], -1)
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
def _make_layer(self, ResBlock, blocks, planes, stride=1):
ii_downsample = None
layers = []
if stride != 1 or self.in_channels != planes*ResBlock.expansion:
ii_downsample = nn.Sequential(
nn.Conv2d(self.in_channels, planes*ResBlock.expansion, kernel_size=1, stride=stride),
nn.BatchNorm2d(planes*ResBlock.expansion)
)
layers.append(ResBlock(self.in_channels, planes, i_downsample=ii_downsample, stride=stride))
self.in_channels = planes*ResBlock.expansion
for i in range(blocks-1):
layers.append(ResBlock(self.in_channels, planes))
return nn.Sequential(*layers)
def ResNet18_Parrallel():
return ResNet_Parrallel(Block, [2,2,2,2])
def ResNet50_Parrallel():
return ResNet_Parrallel(Bottleneck, [3,4,6,3])
def ResNet101_Parrallel():
return ResNet_Parrallel(Bottleneck, [3,4,23,3])
class RNN_Parrallel(nn.Module):
def __init__(self):
super(RNN_Parrallel, self).__init__()
self.encoder_1 = RNN_encoder()
self.encoder_2 = RNN_encoder()
self.classifier = nn.Linear(64,14)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class RNN_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(RNN_encoder, self).__init__()
self.encoder = nn.RNN(342,64,num_layers=1)
self.mapping = nn.Linear(64, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, ht = self.encoder(x)
# classifier
x = ht[-1]
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
class GRU_Parrallel(nn.Module):
def __init__(self):
super(GRU_Parrallel, self).__init__()
self.encoder_1 = GRU_encoder()
self.encoder_2 = GRU_encoder()
self.classifier = nn.Linear(64,14)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class GRU_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(GRU_encoder, self).__init__()
self.encoder = nn.GRU(342,64,num_layers=1)
self.mapping = nn.Linear(64, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, ht = self.encoder(x)
# classifier
x = ht[-1]
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
class LSTM_Parrallel(nn.Module):
def __init__(self):
super(LSTM_Parrallel, self).__init__()
self.encoder_1 = LSTM_encoder()
self.encoder_2 = LSTM_encoder()
self.classifier = nn.Linear(64,14)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class LSTM_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(LSTM_encoder, self).__init__()
self.encoder = nn.LSTM(342,64,num_layers=1)
self.mapping = nn.Linear(64, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, (ht,ct) = self.encoder(x)
# classifier
x = ht[-1]
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
class BiLSTM_Parrallel(nn.Module):
def __init__(self):
super(BiLSTM_Parrallel, self).__init__()
self.encoder_1 = BiLSTM_encoder()
self.encoder_2 = BiLSTM_encoder()
self.classifier = nn.Linear(64,14)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class BiLSTM_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(BiLSTM_encoder, self).__init__()
self.encoder = nn.LSTM(342,64,num_layers=1,bidirectional=True)
self.mapping = nn.Linear(64, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
x = x.view(-1,342,500)
x = x.permute(2,0,1)
_, (ht,ct) = self.encoder(x)
# classifier
x = ht[-1]
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
class CNN_GRU_Parrallel(nn.Module):
def __init__(self):
super(CNN_GRU_Parrallel, self).__init__()
self.encoder_1 = CNN_GRU_encoder()
self.encoder_2 = CNN_GRU_encoder()
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(128,14),
nn.Softmax(dim=1)
)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class CNN_GRU_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(CNN_GRU_encoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv1d(1,16,12,6),
nn.ReLU(),
nn.MaxPool1d(2),
nn.Conv1d(16,32,7,3),
nn.ReLU(),
)
self.mean = nn.AvgPool1d(32)
self.gru = nn.GRU(8,128,num_layers=1)
self.mapping = nn.Linear(128, hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
batch_size = len(x)
# batch x 3 x 114 x 500
x = x.view(batch_size,3*114,500)
x = x.permute(0,2,1)
# batch x 500 x 342
x = x.reshape(batch_size*500,1, 3*114)
# (batch x 500) x 1 x 342
x = self.encoder(x)
# (batch x 500) x 32 x 8
# try 32, (32x8)
x = x.permute(0,2,1)
x = self.mean(x)
x = x.reshape(batch_size, 500, 8)
# batch x 500 x 8
x = x.permute(1,0,2)
# 500 x batch x 8
_, ht = self.gru(x)
# classifier
x = ht[-1]
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
class PatchEmbedding(nn.Module):
def __init__(self, in_channels = 1, patch_size_w = 9, patch_size_h = 25, emb_size = 9*25, img_size = 342*500):
self.patch_size_w = patch_size_w
self.patch_size_h = patch_size_h
super().__init__()
self.projection = nn.Sequential(
nn.Conv2d(in_channels, emb_size, kernel_size = (patch_size_w, patch_size_h), stride = (patch_size_w, patch_size_h)),
Rearrange('b e (h) (w) -> b (h w) e'),
)
self.cls_token = nn.Parameter(torch.randn(1,1,emb_size))
self.position = nn.Parameter(torch.randn(int(img_size/emb_size) + 1, emb_size))
def forward(self, x):
x = x.view(-1,1,342,500)
b, _, _, _ = x.shape
x = self.projection(x)
cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
x = torch.cat([cls_tokens, x], dim=1)
x += self.position
return x
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size = 225, num_heads = 5, dropout = 0.0):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
self.qkv = nn.Linear(emb_size, emb_size*3)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
def forward(self, x, mask = None):
qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3)
queries, keys, values = qkv[0], qkv[1], qkv[2]
energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys)
if mask is not None:
fill_value = torch.finfo(torch.float32).min
energy.mask_fill(~mask, fill_value)
scaling = self.emb_size ** (1/2)
att = F.softmax(energy, dim=-1) / scaling
att = self.att_drop(att)
# sum up over the third axis
out = torch.einsum('bhal, bhlv -> bhav ', att, values)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.projection(out)
return out
class ResidualAdd(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
res = x
x = self.fn(x, **kwargs)
x += res
return x
class FeedForwardBlock(nn.Sequential):
def __init__(self, emb_size, expansion = 4, drop_p = 0.):
super().__init__(
nn.Linear(emb_size, expansion * emb_size),
nn.GELU(),
nn.Dropout(drop_p),
nn.Linear(expansion * emb_size, emb_size),
)
class TransformerEncoderBlock(nn.Sequential):
def __init__(self,
emb_size = 225,
drop_p = 0.5,
forward_expansion = 4,
forward_drop_p = 0.,
** kwargs):
super().__init__(
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
MultiHeadAttention(emb_size, **kwargs),
nn.Dropout(drop_p)
)),
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
FeedForwardBlock(
emb_size, expansion=forward_expansion, drop_p=forward_drop_p),
nn.Dropout(drop_p)
)
))
class TransformerEncoder(nn.Sequential):
def __init__(self, depth = 1, **kwargs):
super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)])
class ClassificationHead(nn.Sequential):
def __init__(self, emb_size, num_classes):
super().__init__(
Reduce('b n e -> b e', reduction='mean'),
nn.LayerNorm(emb_size),
nn.Linear(emb_size, num_classes))
class ViTEncoder(nn.Sequential):
def __init__(self,
in_channels = 1,
patch_size_w = 9,
patch_size_h = 25,
emb_size = 225,
img_size = 342*500,
depth = 1,
*,
num_classes = 14,
**kwargs):
super().__init__(
PatchEmbedding(in_channels, patch_size_w, patch_size_h, emb_size, img_size),
TransformerEncoder(depth, emb_size=emb_size, **kwargs)
)
class ViT_Parrallel(nn.Module):
def __init__(self,
emb_size = 225,
num_classes = 14,
):
super(ViT_Parrallel, self).__init__()
self.encoder_1 = ViT_encoder()
self.encoder_2 = ViT_encoder()
self.classifier = ClassificationHead(emb_size, num_classes)
def forward(self, x1, x2, flag='unsupervised'):
if flag == 'supervised':
x1 = self.encoder_1(x1, flag=flag)
x2 = self.encoder_2(x2, flag=flag)
y1 = self.classifier(x1)
y2 = self.classifier(x2)
return y1, y2
x1 = self.encoder_1(x1)
x2 = self.encoder_2(x2)
return x1, x2
class ViT_encoder(nn.Module):
def __init__(self,hidden_states = 256):
super(ViT_encoder, self).__init__()
self.encoder = ViTEncoder()
self.mapping = nn.Sequential(
Reduce('b n e -> b e', reduction='mean'),
nn.Linear(225, hidden_states)
)
self.bn = nn.BatchNorm1d(hidden_states)
def forward(self, x, flag='unsupervised'):
x = self.encoder(x)
if flag == 'supervised':
return x
else:
x = self.bn(self.mapping(x))
return x
| 20,995 | 30.763994 | 128 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/util.py | from dataset import *
from UT_HAR_model import *
from NTU_Fi_model import *
from widar_model import *
from self_supervised_model import *
import torch
def load_data_n_model(dataset_name, model_name, root):
classes = {'UT_HAR_data':7,'NTU-Fi-HumanID':14,'NTU-Fi_HAR':6,'Widar':22}
if dataset_name == 'UT_HAR_data':
print('using dataset: UT-HAR DATA')
data = UT_HAR_dataset(root)
train_set = torch.utils.data.TensorDataset(data['X_train'],data['y_train'])
test_set = torch.utils.data.TensorDataset(torch.cat((data['X_val'],data['X_test']),0),torch.cat((data['y_val'],data['y_test']),0))
train_loader = torch.utils.data.DataLoader(train_set,batch_size=64,shuffle=True, drop_last=True) # drop_last=True
test_loader = torch.utils.data.DataLoader(test_set,batch_size=256,shuffle=False)
if model_name == 'MLP':
print("using model: MLP")
model = UT_HAR_MLP()
train_epoch = 200
elif model_name == 'LeNet':
print("using model: LeNet")
model = UT_HAR_LeNet()
train_epoch = 200 #40
elif model_name == 'ResNet18':
print("using model: ResNet18")
model = UT_HAR_ResNet18()
train_epoch = 200 #70
elif model_name == 'ResNet50':
print("using model: ResNet50")
model = UT_HAR_ResNet50()
train_epoch = 200 #100
elif model_name == 'ResNet101':
print("using model: ResNet101")
model = UT_HAR_ResNet101()
train_epoch = 200 #100
elif model_name == 'RNN':
print("using model: RNN")
model = UT_HAR_RNN()
train_epoch = 3000
elif model_name == 'GRU':
print("using model: GRU")
model = UT_HAR_GRU()
train_epoch = 200
elif model_name == 'LSTM':
print("using model: LSTM")
model = UT_HAR_LSTM()
train_epoch = 200
elif model_name == 'BiLSTM':
print("using model: BiLSTM")
model = UT_HAR_BiLSTM()
train_epoch = 200
elif model_name == 'CNN+GRU':
print("using model: CNN+GRU")
model = UT_HAR_CNN_GRU()
train_epoch = 200 #20
elif model_name == 'ViT':
print("using model: ViT")
model = UT_HAR_ViT()
train_epoch = 200 #100
return train_loader, test_loader, model, train_epoch
elif dataset_name == 'NTU-Fi-HumanID':
print('using dataset: NTU-Fi-HumanID')
num_classes = classes['NTU-Fi-HumanID']
train_loader = torch.utils.data.DataLoader(dataset=CSI_Dataset(root + 'NTU-Fi-HumanID/test_amp/'), batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=CSI_Dataset(root + 'NTU-Fi-HumanID/train_amp/'), batch_size=64, shuffle=False)
if model_name == 'MLP':
print("using model: MLP")
model = NTU_Fi_MLP(num_classes)
train_epoch = 50 #15
elif model_name == 'LeNet':
print("using model: LeNet")
model = NTU_Fi_LeNet(num_classes)
train_epoch = 50 #20
elif model_name == 'ResNet18':
print("using model: ResNet18")
model = NTU_Fi_ResNet18(num_classes)
train_epoch = 50 #30
elif model_name == 'ResNet50':
print("using model: ResNet50")
model = NTU_Fi_ResNet50(num_classes)
train_epoch = 50 #40
elif model_name == 'ResNet101':
print("using model: ResNet101")
model = NTU_Fi_ResNet101(num_classes)
train_epoch = 50
elif model_name == 'RNN':
print("using model: RNN")
model = NTU_Fi_RNN(num_classes)
train_epoch = 75
elif model_name == 'GRU':
print("using model: GRU")
model = NTU_Fi_GRU(num_classes)
train_epoch = 50 #40
elif model_name == 'LSTM':
print("using model: LSTM")
model = NTU_Fi_LSTM(num_classes)
train_epoch = 50
elif model_name == 'BiLSTM':
print("using model: BiLSTM")
model = NTU_Fi_BiLSTM(num_classes)
train_epoch = 50
elif model_name == 'CNN+GRU':
print("using model: CNN+GRU")
model = NTU_Fi_CNN_GRU(num_classes)
train_epoch = 200 #20
elif model_name == 'ViT':
print("using model: ViT")
model = NTU_Fi_ViT(num_classes=num_classes)
train_epoch = 50
return train_loader, test_loader, model, train_epoch
elif dataset_name == 'NTU-Fi_HAR':
print('using dataset: NTU-Fi_HAR')
num_classes = classes['NTU-Fi_HAR']
train_loader = torch.utils.data.DataLoader(dataset=CSI_Dataset(root + 'NTU-Fi_HAR/train_amp/'), batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=CSI_Dataset(root + 'NTU-Fi_HAR/test_amp/'), batch_size=64, shuffle=False)
if model_name == 'MLP':
print("using model: MLP")
model = NTU_Fi_MLP(num_classes)
train_epoch = 30 #10
elif model_name == 'LeNet':
print("using model: LeNet")
model = NTU_Fi_LeNet(num_classes)
train_epoch = 30 #10
elif model_name == 'ResNet18':
print("using model: ResNet18")
model = NTU_Fi_ResNet18(num_classes)
train_epoch = 30
elif model_name == 'ResNet50':
print("using model: ResNet50")
model = NTU_Fi_ResNet50(num_classes)
train_epoch = 30 #40
elif model_name == 'ResNet101':
print("using model: ResNet101")
model = NTU_Fi_ResNet101(num_classes)
train_epoch = 30
elif model_name == 'RNN':
print("using model: RNN")
model = NTU_Fi_RNN(num_classes)
train_epoch = 70
elif model_name == 'GRU':
print("using model: GRU")
model = NTU_Fi_GRU(num_classes)
train_epoch = 30 #20
elif model_name == 'LSTM':
print("using model: LSTM")
model = NTU_Fi_LSTM(num_classes)
train_epoch = 30 #20
elif model_name == 'BiLSTM':
print("using model: BiLSTM")
model = NTU_Fi_BiLSTM(num_classes)
train_epoch = 30 #20
elif model_name == 'CNN+GRU':
print("using model: CNN+GRU")
model = NTU_Fi_CNN_GRU(num_classes)
train_epoch = 100 #20
elif model_name == 'ViT':
print("using model: ViT")
model = NTU_Fi_ViT(num_classes=num_classes)
train_epoch = 30
return train_loader, test_loader, model, train_epoch
elif dataset_name == 'Widar':
print('using dataset: Widar')
num_classes = classes['Widar']
train_loader = torch.utils.data.DataLoader(dataset=Widar_Dataset(root + 'Widardata/train/'), batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=Widar_Dataset(root + 'Widardata/test/'), batch_size=128, shuffle=False)
if model_name == 'MLP':
print("using model: MLP")
model = Widar_MLP(num_classes)
train_epoch = 30 #20
elif model_name == 'LeNet':
print("using model: LeNet")
model = Widar_LeNet(num_classes)
train_epoch = 100 #40
elif model_name == 'ResNet18':
print("using model: ResNet18")
model = Widar_ResNet18(num_classes)
train_epoch = 100
elif model_name == 'ResNet50':
print("using model: ResNet50")
model = Widar_ResNet50(num_classes)
train_epoch = 100 #40
elif model_name == 'ResNet101':
print("using model: ResNet101")
model = Widar_ResNet101(num_classes)
train_epoch = 100
elif model_name == 'RNN':
print("using model: RNN")
model = Widar_RNN(num_classes)
train_epoch = 500
elif model_name == 'GRU':
print("using model: GRU")
model = Widar_GRU(num_classes)
train_epoch = 200
elif model_name == 'LSTM':
print("using model: LSTM")
model = Widar_LSTM(num_classes)
train_epoch = 200 #20
elif model_name == 'BiLSTM':
print("using model: BiLSTM")
model = Widar_BiLSTM(num_classes)
train_epoch = 200
elif model_name == 'CNN+GRU':
print("using model: CNN+GRU")
model = Widar_CNN_GRU(num_classes)
train_epoch = 200 #20
elif model_name == 'ViT':
print("using model: ViT")
model = Widar_ViT(num_classes=num_classes)
train_epoch = 200
return train_loader, test_loader, model, train_epoch
def load_unsupervised_data_n_model(model_name,root):
HAR_train_dataset=CSI_Dataset(root+'NTU-Fi_HAR/train_amp/')
HAR_test_dataset=CSI_Dataset(root+'NTU-Fi_HAR/test_amp/')
unsupervised_train_dataset = torch.utils.data.ConcatDataset([HAR_train_dataset,HAR_test_dataset])
unsupervised_train_loader = torch.utils.data.DataLoader(dataset=unsupervised_train_dataset, batch_size=64, shuffle=True)
supervised_train_loader = torch.utils.data.DataLoader(dataset=CSI_Dataset(root+'NTU-Fi-HumanID/test_amp/'), batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=CSI_Dataset(root+'NTU-Fi-HumanID/train_amp/'), batch_size=64, shuffle=False)
if model_name == 'MLP':
print("using model: MLP_Parrallel")
model = MLP_Parrallel()
elif model_name == 'LeNet':
print("using model: CNN_Parrallel")
model = CNN_Parrallel()
elif model_name == 'ResNet18':
print("using model: ResNet18_Parrallel")
model = ResNet18_Parrallel()
elif model_name == 'ResNet50':
print("using model: ResNet50_Parralle")
model = ResNet50_Parrallel()
elif model_name == 'ResNet101':
print("using model: ResNet101_Parrallel")
model = ResNet101_Parrallel()
elif model_name == 'RNN':
print("using model: RNN_Parrallel")
model = RNN_Parrallel()
elif model_name == 'GRU':
print("using model: GRU_Parrallel")
model = GRU_Parrallel()
elif model_name == 'LSTM':
print("using model: LSTM_Parrallel")
model = LSTM_Parrallel()
elif model_name == 'BiLSTM':
print("using model: BiLSTM_Parrallel")
model = BiLSTM_Parrallel()
elif model_name == 'CNN+GRU':
print("using model: CNN_GRU_Parrallel")
model = CNN_GRU_Parrallel()
elif model_name == 'ViT':
print("using model: ViT_Parrallel")
model = ViT_Parrallel()
return unsupervised_train_loader, supervised_train_loader, test_loader, model | 10,985 | 41.416988 | 140 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/widar_model.py | import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
class Widar_MLP(nn.Module):
def __init__(self, num_classes):
super(Widar_MLP,self).__init__()
self.fc = nn.Sequential(
nn.Linear(22*20*20,1024),
nn.ReLU(),
nn.Linear(1024,128),
nn.ReLU(),
nn.Linear(128,num_classes)
)
def forward(self,x):
x = x.view(-1,22*20*20)
x = self.fc(x)
return x
class Widar_LeNet(nn.Module):
def __init__(self, num_classes):
super(Widar_LeNet,self).__init__()
self.encoder = nn.Sequential(
#input size: (22,20,20)
nn.Conv2d(22,32,6,stride=2),
nn.ReLU(True),
nn.Conv2d(32,64,3,stride=1),
nn.ReLU(True),
nn.Conv2d(64,96,3,stride=1),
nn.ReLU(True),
)
self.fc = nn.Sequential(
nn.Linear(96*4*4,128),
nn.ReLU(),
nn.Linear(128,num_classes)
)
def forward(self,x):
x = self.encoder(x)
x = x.view(-1,96*4*4)
out = self.fc(x)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels*self.expansion, kernel_size=1, stride=1, padding=0)
self.batch_norm3 = nn.BatchNorm2d(out_channels*self.expansion)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.relu(self.batch_norm2(self.conv2(x)))
x = self.conv3(x)
x = self.batch_norm3(x)
#downsample if needed
if self.i_downsample is not None:
identity = self.i_downsample(identity)
#add identity
x+=identity
x=self.relu(x)
return x
class Block(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, i_downsample=None, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=1, bias=False)
self.batch_norm1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, bias=False)
self.batch_norm2 = nn.BatchNorm2d(out_channels)
self.i_downsample = i_downsample
self.stride = stride
self.relu = nn.ReLU()
def forward(self, x):
identity = x.clone()
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.batch_norm2(self.conv2(x))
if self.i_downsample is not None:
identity = self.i_downsample(identity)
x += identity
x = self.relu(x)
return x
class Widar_ResNet(nn.Module):
def __init__(self, ResBlock, layer_list, num_classes):
super(Widar_ResNet, self).__init__()
self.reshape = nn.Sequential(
nn.ConvTranspose2d(22,3,7,stride=1),
nn.ReLU(),
nn.ConvTranspose2d(3,3,kernel_size=7,stride=1),
nn.ReLU()
)
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.batch_norm1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size = 3, stride=2, padding=1)
self.layer1 = self._make_layer(ResBlock, layer_list[0], planes=64)
self.layer2 = self._make_layer(ResBlock, layer_list[1], planes=128, stride=2)
self.layer3 = self._make_layer(ResBlock, layer_list[2], planes=256, stride=2)
self.layer4 = self._make_layer(ResBlock, layer_list[3], planes=512, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(512*ResBlock.expansion, num_classes)
def forward(self, x):
x = self.reshape(x)
x = self.relu(self.batch_norm1(self.conv1(x)))
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
def _make_layer(self, ResBlock, blocks, planes, stride=1):
ii_downsample = None
layers = []
if stride != 1 or self.in_channels != planes*ResBlock.expansion:
ii_downsample = nn.Sequential(
nn.Conv2d(self.in_channels, planes*ResBlock.expansion, kernel_size=1, stride=stride),
nn.BatchNorm2d(planes*ResBlock.expansion)
)
layers.append(ResBlock(self.in_channels, planes, i_downsample=ii_downsample, stride=stride))
self.in_channels = planes*ResBlock.expansion
for i in range(blocks-1):
layers.append(ResBlock(self.in_channels, planes))
return nn.Sequential(*layers)
def Widar_ResNet18(num_classes):
return Widar_ResNet(Block, [2,2,2,2], num_classes = num_classes)
def Widar_ResNet50(num_classes):
return Widar_ResNet(Bottleneck, [3,4,6,3], num_classes = num_classes)
def Widar_ResNet101(num_classes):
return Widar_ResNet(Bottleneck, [3,4,23,3], num_classes = num_classes)
class Widar_RNN(nn.Module):
def __init__(self,num_classes):
super(Widar_RNN,self).__init__()
self.rnn = nn.RNN(400,64,num_layers=1)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,22,400)
x = x.permute(1,0,2)
_, ht = self.rnn(x)
outputs = self.fc(ht[-1])
return outputs
class Widar_GRU(nn.Module):
def __init__(self,num_classes):
super(Widar_GRU,self).__init__()
self.gru = nn.GRU(400,64,num_layers=1)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,22,400)
x = x.permute(1,0,2)
_, ht = self.gru(x)
outputs = self.fc(ht[-1])
return outputs
class Widar_LSTM(nn.Module):
def __init__(self,num_classes):
super(Widar_LSTM,self).__init__()
self.lstm = nn.LSTM(400,64,num_layers=1)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,22,400)
x = x.permute(1,0,2)
_, (ht,ct) = self.lstm(x)
outputs = self.fc(ht[-1])
return outputs
class Widar_BiLSTM(nn.Module):
def __init__(self,num_classes):
super(Widar_BiLSTM,self).__init__()
self.lstm = nn.LSTM(400,64,num_layers=1,bidirectional=True)
self.fc = nn.Linear(64,num_classes)
def forward(self,x):
x = x.view(-1,22,400)
x = x.permute(1,0,2)
_, (ht,ct) = self.lstm(x)
outputs = self.fc(ht[-1])
return outputs
class Widar_CNN_GRU(nn.Module):
def __init__(self,num_classes):
super(Widar_CNN_GRU,self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1,8,6,2),
nn.ReLU(),
nn.Conv2d(8,16,3,1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.fc = nn.Sequential(
nn.Linear(16*3*3,64),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(64,64),
nn.ReLU(),
)
self.gru = nn.GRU(64,128,num_layers=1)
self.classifier = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(128,num_classes),
nn.Softmax(dim=1)
)
def forward(self,x):
batch_size = len(x)
# batch x 22 x 20 x 20
x = x.view(batch_size*22,1,20,20)
# 22*batch x 1 x 20 x 20
x = self.encoder(x)
# 22*batch x 16 x 3 x 3
x = x.view(-1,16*3*3)
x = self.fc(x)
# 22*batch x 64
x = x.view(-1,22,64)
x = x.permute(1,0,2)
# 22 x batch x 64
_, ht = self.gru(x)
outputs = self.classifier(ht[-1])
return outputs
class PatchEmbedding(nn.Module):
def __init__(self, in_channels = 1, patch_size_w = 2, patch_size_h = 40, emb_size = 2*40, img_size = 22*400):
self.patch_size_w = patch_size_w
self.patch_size_h = patch_size_h
super().__init__()
self.projection = nn.Sequential(
nn.Conv2d(in_channels, emb_size, kernel_size = (patch_size_w, patch_size_h), stride = (patch_size_w, patch_size_h)),
Rearrange('b e (h) (w) -> b (h w) e'),
)
self.cls_token = nn.Parameter(torch.randn(1,1,emb_size))
self.position = nn.Parameter(torch.randn(int(img_size/emb_size) + 1, emb_size))
def forward(self, x):
x = x.view(-1,1,22,400)
b, _, _, _ = x.shape
x = self.projection(x)
cls_tokens = repeat(self.cls_token, '() n e -> b n e', b=b)
x = torch.cat([cls_tokens, x], dim=1)
x += self.position
return x
class MultiHeadAttention(nn.Module):
def __init__(self, emb_size = 80, num_heads = 5, dropout = 0.0):
super().__init__()
self.emb_size = emb_size
self.num_heads = num_heads
self.qkv = nn.Linear(emb_size, emb_size*3)
self.att_drop = nn.Dropout(dropout)
self.projection = nn.Linear(emb_size, emb_size)
def forward(self, x, mask = None):
qkv = rearrange(self.qkv(x), "b n (h d qkv) -> (qkv) b h n d", h=self.num_heads, qkv=3)
queries, keys, values = qkv[0], qkv[1], qkv[2]
energy = torch.einsum('bhqd, bhkd -> bhqk', queries, keys)
if mask is not None:
fill_value = torch.finfo(torch.float32).min
energy.mask_fill(~mask, fill_value)
scaling = self.emb_size ** (1/2)
att = F.softmax(energy, dim=-1) / scaling
att = self.att_drop(att)
# sum up over the third axis
out = torch.einsum('bhal, bhlv -> bhav ', att, values)
out = rearrange(out, "b h n d -> b n (h d)")
out = self.projection(out)
return out
class ResidualAdd(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
res = x
x = self.fn(x, **kwargs)
x += res
return x
class FeedForwardBlock(nn.Sequential):
def __init__(self, emb_size, expansion = 4, drop_p = 0.):
super().__init__(
nn.Linear(emb_size, expansion * emb_size),
nn.GELU(),
nn.Dropout(drop_p),
nn.Linear(expansion * emb_size, emb_size),
)
class TransformerEncoderBlock(nn.Sequential):
def __init__(self,
emb_size = 80,
drop_p = 0.5,
forward_expansion = 4,
forward_drop_p = 0.,
** kwargs):
super().__init__(
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
MultiHeadAttention(emb_size, **kwargs),
nn.Dropout(drop_p)
)),
ResidualAdd(nn.Sequential(
nn.LayerNorm(emb_size),
FeedForwardBlock(
emb_size, expansion=forward_expansion, drop_p=forward_drop_p),
nn.Dropout(drop_p)
)
))
class TransformerEncoder(nn.Sequential):
def __init__(self, depth = 1, **kwargs):
super().__init__(*[TransformerEncoderBlock(**kwargs) for _ in range(depth)])
class ClassificationHead(nn.Sequential):
def __init__(self, emb_size, num_classes):
super().__init__(
Reduce('b n e -> b e', reduction='mean'),
nn.LayerNorm(emb_size),
nn.Linear(emb_size, num_classes))
class Widar_ViT(nn.Sequential):
def __init__(self,
in_channels = 1,
patch_size_w = 2,
patch_size_h = 40,
emb_size = 80,
img_size = 22*400,
depth = 1,
*,
num_classes,
**kwargs):
super().__init__(
PatchEmbedding(in_channels, patch_size_w, patch_size_h, emb_size, img_size),
TransformerEncoder(depth, emb_size=emb_size, **kwargs),
ClassificationHead(emb_size, num_classes)
)
| 12,936 | 32.866492 | 128 | py |
WiFi-CSI-Sensing-Benchmark | WiFi-CSI-Sensing-Benchmark-main/self_supervised.py | import torch
import torch.optim as optim
import random
import torch.nn as nn
from util import load_unsupervised_data_n_model
import argparse
from torch.autograd import Variable
class EntLoss(nn.Module):
def __init__(self, args, lam1, lam2, pqueue=None):
super(EntLoss, self).__init__()
self.lam1 = lam1
self.lam2 = lam2
self.pqueue = pqueue
self.args = args
def forward(self, feat1, feat2, use_queue=False):
probs1 = torch.nn.functional.softmax(feat1, dim=-1)
probs2 = torch.nn.functional.softmax(feat2, dim=-1)
loss = dict()
loss['kl'] = 0.5 * (KL(probs1, probs2, self.args) + KL(probs2, probs1, self.args))
sharpened_probs1 = torch.nn.functional.softmax(feat1/self.args.tau, dim=-1)
sharpened_probs2 = torch.nn.functional.softmax(feat2/self.args.tau, dim=-1)
loss['eh'] = 0.5 * (EH(sharpened_probs1, self.args) + EH(sharpened_probs2, self.args))
# whether use historical data
loss['he'] = 0.5 * (HE(sharpened_probs1, self.args) + HE(sharpened_probs2, self.args))
# TWIST Loss
loss['final'] = loss['kl'] + ((1+self.lam1)*loss['eh'] - self.lam2*loss['he'])
#########################################################################
# probability distribution (PKT by Kernel Density Estimation)
loss['kde'] = cosine_similarity_loss(feat1, feat2)
# nuclear-norm
loss['n-norm'] = -0.5 * (torch.norm(sharpened_probs1,'nuc')+torch.norm(sharpened_probs2,'nuc')) * 0.001
loss['final-kde'] = loss['kde'] * 100 + loss['final']#+ loss['n-norm']
return loss
def KL(probs1, probs2, args):
kl = (probs1 * (probs1 + args.EPS).log() - probs1 * (probs2 + args.EPS).log()).sum(dim=1)
kl = kl.mean()
return kl
def CE(probs1, probs2, args):
ce = - (probs1 * (probs2 + args.EPS).log()).sum(dim=1)
ce = ce.mean()
return ce
def HE(probs, args):
mean = probs.mean(dim=0)
ent = - (mean * (mean + args.EPS).log()).sum()
return ent
def EH(probs, args):
ent = - (probs * (probs + args.EPS).log()).sum(dim=1)
mean = ent.mean()
return mean
def cosine_similarity_loss(output_net, target_net, eps=0.0000001):
# Normalize each vector by its norm
output_net_norm = torch.sqrt(torch.sum(output_net ** 2, dim=1, keepdim=True))
output_net = output_net / (output_net_norm + eps)
output_net[output_net != output_net] = 0
target_net_norm = torch.sqrt(torch.sum(target_net ** 2, dim=1, keepdim=True))
target_net = target_net / (target_net_norm + eps)
target_net[target_net != target_net] = 0
# Calculate the cosine similarity
model_similarity = torch.mm(output_net, output_net.transpose(0, 1))
target_similarity = torch.mm(target_net, target_net.transpose(0, 1))
# Scale cosine similarity to 0..1
model_similarity = (model_similarity + 1.0) / 2.0
target_similarity = (target_similarity + 1.0) / 2.0
# Transform them into probabilities
model_similarity = model_similarity / torch.sum(model_similarity, dim=1, keepdim=True)
target_similarity = target_similarity / torch.sum(target_similarity, dim=1, keepdim=True)
# Calculate the KL-divergence
loss = torch.mean(target_similarity * torch.log((target_similarity + eps) / (model_similarity + eps)))
return loss
def gaussian_noise(csi, epsilon):
noise = torch.normal(1, 2, size=(3, 114, 500)).cuda()
perturbed_csi = csi + epsilon*noise
return perturbed_csi
def main():
learning_rate = 1e-3
parser = argparse.ArgumentParser('Self-Supervised')
parser.add_argument('--tau', type=float, default=1.0, metavar='LR')
parser.add_argument('--EPS', type=float, default=1e-5, help='episillon')
parser.add_argument('--weight-decay', type=float, default=1.5e-6, help='weight decay (default: 1e-4)')
parser.add_argument('--lam1', type=float, default=0.0, metavar='LR')
parser.add_argument('--lam2', type=float, default=1.0, metavar='LR')
parser.add_argument('--local_crops_number', type=int, default=12)
parser.add_argument('--min1', type=float, default=0.4, metavar='LR')
parser.add_argument('--max1', type=float, default=1.0, metavar='LR')
parser.add_argument('--min2', type=float, default=0.05, metavar='LR')
parser.add_argument('--max2', type=float, default=0.4, metavar='LR')
parser.add_argument('--gpu', type=int, default=1, metavar='gpu')
parser.add_argument('--eval', type=str, default='no', metavar='gpu')
parser.add_argument('--model', choices = ['MLP','LeNet','ResNet18','ResNet50','ResNet101','RNN','GRU','LSTM','BiLSTM','CNN+GRU','ViT'])
args = parser.parse_args()
args.global_crops_scale = (args.min1, args.max1)
args.local_crops_scale = (args.min2, args.max2)
criterion = EntLoss(args, 0.0, 0.5)
root = "./Data/"
unsupervised_train_loader, supervised_train_loader, test_dataloader, model = load_unsupervised_data_n_model(args.model,root)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
#######################################
# self-supervised training
print ('Self-supervised encoder training')
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate, weight_decay=args.weight_decay)
for epoch in range(100):
total_loss = 0
kl_loss = 0
eh_loss = 0
he_loss = 0
kde_loss = 0
for data in unsupervised_train_loader:
x, y = data
x, y = x.to(device), y.to(device)
x1 = gaussian_noise(x, random.uniform(0, 2.0))
x2 = gaussian_noise(x, random.uniform(0.1, 2.0))
# ===================forward=====================
feat_x1, feat_x2 = model(x1, x2)
loss = criterion(feat_x1, feat_x2)
loss_kl = loss['kl']
loss_eh = loss['eh']
loss_he = loss['he']
loss_kde = loss['kde']
loss = loss['final-kde']
# ===================backward====================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ===================log========================
total_loss += loss.data
kl_loss += loss_kl.data
eh_loss += loss_eh.data
he_loss += loss_he.data
kde_loss += loss_kde.data
print('epoch [{}/{}], total loss:{:.4f},kl loss:{:.4f},eh loss:{:.4f},he loss:{:.4f},kde loss:{:.4f}'
.format(epoch+1,100, total_loss, kl_loss, eh_loss, he_loss, kde_loss))
#######################################
#######################################
# test
def test():
model.eval()
correct_1, correct_2 = 0, 0
total = 0
with torch.no_grad():
for data in test_dataloader:
x, y = data
x, y = x.to(device), y.to(device)
y1, y2 = model(x, x, flag='supervised')
_, pred_1 = torch.max(y1.data, 1)
_, pred_2 = torch.max(y2.data, 1)
total += y.size(0)
correct_1 += (pred_1 == y).sum().item()
correct_2 += (pred_2 == y).sum().item()
print('Test accuracy: {:.2f}%, {:.2f}%'.format(100 * correct_1 / total, 100 * correct_2 / total))
#######################################
##################################
# supervised learning
print ('Supervised classifier training')
optimizer_supervised = torch.optim.Adam(model.classifier.parameters(), lr=learning_rate, weight_decay=1e-5)
ce_criterion = nn.CrossEntropyLoss()
for epoch in range(300):
model.train()
total_loss = 0
for data in supervised_train_loader:
x, y = data
x = Variable(x).to(device)
y = y.type(torch.LongTensor)
y = y.to(device)
# ===================forward=====================
y1, y2 = model(x, x, flag='supervised')
loss = ce_criterion(y1, y) + ce_criterion(y2, y)
# ===================backward====================
optimizer_supervised.zero_grad()
loss.backward()
optimizer_supervised.step()
# ===================log========================
total_loss += loss.data
print('epoch [{}/{}], loss:{:.6f}'
.format(epoch+1, 300, total_loss))
# test
if epoch > 250:
test()
##################################
return
if __name__ == "__main__":
main()
| 8,839 | 39.365297 | 139 | py |
deficient-efficient | deficient-efficient-master/main.py | ''''Writing everything into one script..'''
from __future__ import print_function
import os
import imp
import sys
import time
import json
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from functools import reduce
from tqdm import tqdm
from tensorboardX import SummaryWriter
from funcs import *
from models.wide_resnet import WideResNet, WRN_50_2
from models.darts import DARTS, Cutout, _data_transforms_cifar10 as darts_transforms
from models.MobileNetV2 import MobileNetV2
os.mkdir('checkpoints/') if not os.path.isdir('checkpoints/') else None
parser = argparse.ArgumentParser(description='Student/teacher training')
parser.add_argument('dataset', type=str, choices=['cifar10', 'cifar100', 'imagenet'], help='Choose between Cifar10/100/imagenet.')
parser.add_argument('mode', choices=['student','teacher'], type=str, help='Learn a teacher or a student')
parser.add_argument('--imagenet_loc', default='/disk/scratch_ssd/imagenet',type=str, help='folder containing imagenet train and val folders')
parser.add_argument('--workers', default=2, type=int, help='No. of data loading workers. Make this high for imagenet')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--GPU', default=None, type=str, help='GPU to use')
parser.add_argument('--student_checkpoint', '-s', default='wrn_40_2_student_KT',type=str, help='checkpoint to save/load student')
parser.add_argument('--teacher_checkpoint', '-t', default='wrn_40_2_T',type=str, help='checkpoint to load in teacher')
#network stuff
parser.add_argument('--network', default='WideResNet', type=str, help='network to use')
parser.add_argument('--wrn_depth', default=40, type=int, help='depth for WRN')
parser.add_argument('--wrn_width', default=2, type=float, help='width for WRN')
parser.add_argument('--module', default=None, type=str, help='path to file containing custom Conv and maybe Block module definitions')
parser.add_argument('--blocktype', default='Basic',type=str, help='blocktype used if specify a --conv')
parser.add_argument('--conv', default=None, type=str, help='Conv type')
parser.add_argument('--AT_split', default=1, type=int, help='group splitting for AT loss')
parser.add_argument('--budget', default=None, type=float, help='budget of parameters to use for the network')
#learning stuff
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--lr_decay_ratio', default=0.2, type=float, help='learning rate decay')
parser.add_argument('--temperature', default=4, type=float, help='temp for KD')
parser.add_argument('--alpha', default=0.0, type=float, help='alpha for KD')
parser.add_argument('--aux_loss', default='AT', type=str, help='AT or SE loss')
parser.add_argument('--beta', default=1e3, type=float, help='beta for AT')
parser.add_argument('--epoch_step', default='[60,120,160]', type=str,
help='json list with epochs to drop lr on')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--print_freq', default=10, type=int, help="print stats frequency")
parser.add_argument('--batch_size', default=128, type=int,
help='minibatch size')
parser.add_argument('--weight_decay', default=0.0005, type=float)
parser.add_argument('--nocrswd', action='store_true', help='Disable compression ratio scaled weight decay.')
parser.add_argument('--clip_grad', default=None, type=float)
args = parser.parse_args()
if args.mode == 'teacher':
logdir = "runs/%s"%args.teacher_checkpoint
elif args.mode == 'student':
logdir = "runs/%s.%s"%(args.teacher_checkpoint, args.student_checkpoint)
append = 0
while os.path.isdir(logdir+".%i"%append):
append += 1
if append > 0:
logdir = logdir+".%i"%append
writer = SummaryWriter(logdir)
def record_oom(train_func):
def wrapper(*args):
try:
_ = train_func(*args)
result = (True, "Success")
except RuntimeError as e:
result = (False, str(e))
except AssertionError as e:
result = (True, "Success")
except Exception as e:
# something else that's not a memory error going wrong
result = (False, str(e))
logfile = "oom_checks.json"
if os.path.exists(logfile):
with open(logfile, 'r') as f:
logs = json.load(f)
else:
logs = []
logs.append((sys.argv, result))
with open(logfile, 'w') as f:
f.write(json.dumps(logs))
assert False, "recorded"
return wrapper
def train_teacher(net):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
net.train()
end = time.time()
for batch_idx, (inputs, targets) in enumerate(trainloader):
# measure data loading time
data_time.update(time.time() - end)
if use_cuda:
inputs, targets = inputs.cuda(non_blocking=True), targets.cuda(non_blocking=True)
if isinstance(net, DARTS):
outputs, _, aux = net(inputs)
outputs = torch.cat([outputs, aux], 0)
targets = torch.cat([targets, targets], 0)
else:
outputs, _ = net(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
err1 = 100. - prec1
err5 = 100. - prec5
losses.update(loss.item(), inputs.size(0))
top1.update(err1[0], inputs.size(0))
top5.update(err5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Error@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, batch_idx, len(trainloader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
writer.add_scalar('train_loss', losses.avg, epoch)
writer.add_scalar('train_top1', top1.avg, epoch)
writer.add_scalar('train_top5', top5.avg, epoch)
train_losses.append(losses.avg)
train_errors.append(top1.avg)
def train_student(net, teach):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
net.train()
teach.eval()
end = time.time()
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs = inputs.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
if isinstance(net, DARTS):
outputs, student_AMs, aux = net(inputs)
if aux is not None:
outputs_student = torch.cat([outputs, aux], 0)
targets_plus_aux = torch.cat([targets, targets], 0)
else:
outputs_student = outputs
targets_plus_aux = targets
with torch.no_grad():
outputs_teacher, teacher_AMs, _ = teach(inputs)
if aux is not None:
outputs_teacher = torch.cat([outputs_teacher, outputs_teacher], 0)
else:
outputs_student, student_AMs = net(inputs)
outputs = outputs_student
targets_plus_aux = targets
with torch.no_grad():
outputs_teacher, teacher_AMs = teach(inputs)
# If alpha is 0 then this loss is just a cross entropy.
loss = distillation(outputs_student, outputs_teacher, targets_plus_aux, args.temperature, args.alpha)
#Add an attention tranfer loss for each intermediate. Let's assume the default is three (as in the original
#paper) and adjust the beta term accordingly.
adjusted_beta = (args.beta*3)/len(student_AMs)
for i in range(len(student_AMs)):
loss += adjusted_beta * F.mse_loss(student_AMs[i], teacher_AMs[i])
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
err1 = 100. - prec1
err5 = 100. - prec5
losses.update(loss.item(), inputs.size(0))
top1.update(err1[0], inputs.size(0))
top5.update(err5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
if args.clip_grad is not None:
max_grad = 0.
for p in net.parameters():
g = p.grad.max().item()
if g > max_grad:
max_grad = g
nn.utils.clip_grad_norm(net.parameters(), args.clip_grad)
print("Max grad: ", max_grad)
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Error@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, batch_idx, len(trainloader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
writer.add_scalar('train_loss', losses.avg, epoch)
writer.add_scalar('train_top1', top1.avg, epoch)
writer.add_scalar('train_top5', top5.avg, epoch)
train_losses.append(losses.avg)
train_errors.append(top1.avg)
def validate(net, checkpoint=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
net.eval()
end = time.time()
for batch_idx, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.cuda(), targets.cuda()
with torch.no_grad():
inputs, targets = Variable(inputs), Variable(targets)
if isinstance(net, DARTS):
outputs, _, _ = net(inputs)
else:
outputs, _ = net(inputs)
if isinstance(outputs,tuple):
outputs = outputs[0]
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
err1 = 100. - prec1
err5 = 100. - prec5
losses.update(loss.item(), inputs.size(0))
top1.update(err1[0], inputs.size(0))
top5.update(err5[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.print_freq == 0:
print('validate: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Error@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Error@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
batch_idx, len(valloader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Error@1 {top1.avg:.3f} Error@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
if checkpoint:
writer.add_scalar('val_loss', losses.avg, epoch)
writer.add_scalar('val_top1', top1.avg, epoch)
writer.add_scalar('val_top5', top5.avg, epoch)
val_losses.append(losses.avg)
val_errors.append(top1.avg)
if isinstance(net, torch.nn.DataParallel):
state_dict = net.module.state_dict()
else:
state_dict = net.state_dict()
print('Saving..')
state = {
'net': state_dict,
'epoch': epoch,
'args': sys.argv,
'width': args.wrn_width,
'depth': args.wrn_depth,
'conv': args.conv,
'blocktype': args.blocktype,
'module': args.module,
'train_losses': train_losses,
'train_errors': train_errors,
'val_losses': val_losses,
'val_errors': val_errors,
}
print('SAVED!')
torch.save(state, 'checkpoints/%s.t7' % checkpoint)
def set_for_budget(eval_network_size, conv_type, budget):
assert False, "Deprecated this because I don't trust it 100%"
# set bounds using knowledge of conv_type hyperparam domain
if 'ACDC' == conv_type:
bounds = (2, 128)
post_process = lambda x: int(round(x))
elif 'Hashed' == conv_type:
bounds = (0.001,0.9)
post_process = lambda x: x # do nothing
elif 'SepHashed' == conv_type:
bounds = (0.001,0.9)
post_process = lambda x: x # do nothing
elif 'Generic' == conv_type:
bounds = (0.1,0.9)
post_process = lambda x: x # do nothing
elif 'TensorTrain' == conv_type:
bounds = (0.1,0.9)
post_process = lambda x: x # do nothing
elif 'Tucker' == conv_type:
bounds = (0.1,0.9)
post_process = lambda x: x # do nothing
elif 'CP' == conv_type:
bounds = (0.1,0.9)
post_process = lambda x: x # do nothing
else:
raise ValueError("Don't know: "+conv_type)
def obj(h):
return abs(budget-eval_network_size(h))
from scipy.optimize import minimize_scalar
minimizer = minimize_scalar(obj, bounds=bounds, method='bounded')
return post_process(minimizer.x)
def n_params(net):
return sum([reduce(lambda x,y:x*y, p.size()) for p in net.parameters()])
def darts_defaults(args):
args.batch_size = 96
args.lr = 0.025
args.momentum = 0.9
args.weight_decay = 3e-4
args.epochs = 600
return args
def imagenet_defaults(args):
args.batch_size=256
args.epochs = 90
args.lr_decay_ratio = 0.1
args.weight_decay = 1e-4
args.epoch_step = '[30,60]'
args.workers = 16
return args
def mobilenetv2_defaults(args):
args.batch_size=256
args.epochs = 150
args.lr = 0.05
args.weight_decay = 4e-5
args.workers = 16
return args
def get_scheduler(optimizer, epoch_step, args):
if args.network == 'WideResNet' or args.network == 'WRN_50_2':
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=epoch_step,
gamma=args.lr_decay_ratio)
elif args.network == 'DARTS' or args.network == 'MobileNetV2':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
return scheduler
if __name__ == '__main__':
if args.aux_loss == 'AT':
aux_loss = at_loss
elif args.aux_loss == 'SE':
aux_loss = se_loss
if args.network == 'DARTS':
args = darts_defaults(args) # different training hyperparameters
elif args.network == 'WRN_50_2':
args = imagenet_defaults(args)
elif args.network == 'MobileNetV2':
args = mobilenetv2_defaults(args)
print(vars(args))
parallelise = None
if args.GPU is not None:
if args.GPU[0] != '[':
args.GPU = '[' + args.GPU + ']'
args.GPU = [i for i, _ in enumerate(json.loads(args.GPU))]
if len(args.GPU) > 1:
def parallelise(model):
model = torch.nn.DataParallel(model, device_ids=args.GPU)
model.grouped_parameters = model.module.grouped_parameters
return model
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "%i"%args.GPU[0]
use_cuda = torch.cuda.is_available()
assert use_cuda, 'Error: No CUDA!'
val_losses = []
train_losses = []
val_errors = []
train_errors = []
best_acc = 0
start_epoch = 0
epoch_step = json.loads(args.epoch_step)
# Data and loaders
print('==> Preparing data..')
if args.dataset == 'cifar10':
num_classes = 10
if args.network == 'DARTS':
transforms_train, transforms_validate = darts_transforms()
else:
transforms_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
Cutout(16)])
transforms_validate = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),])
trainset = torchvision.datasets.CIFAR10(root='/disk/scratch/datasets/cifar',
train=True, download=False, transform=transforms_train)
valset = torchvision.datasets.CIFAR10(root='/disk/scratch/datasets/cifar',
train=False, download=False, transform=transforms_validate)
elif args.dataset == 'cifar100':
num_classes = 100
if args.network == 'DARTS':
raise NotImplementedError("Could use transforms for CIFAR-10, but not ported yet.")
transforms_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
transforms_validate = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),
])
trainset = torchvision.datasets.CIFAR100(root='/disk/scratch/datasets/cifar100',
train=True, download=True, transform=transforms_train)
validateset = torchvision.datasets.CIFAR100(root='/disk/scratch/datasets/cifar100',
train=False, download=True, transform=transforms_validate)
elif args.dataset == 'imagenet':
num_classes = 1000
traindir = os.path.join(args.imagenet_loc, 'train')
valdir = os.path.join(args.imagenet_loc, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_validate = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
trainset = torchvision.datasets.ImageFolder(traindir, transform_train)
valset = torchvision.datasets.ImageFolder(valdir, transform_validate)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers,
pin_memory = True if args.dataset == 'imagenet' else False)
valloader = torch.utils.data.DataLoader(valset, batch_size=min(100,args.batch_size), shuffle=False,
num_workers=args.workers,
pin_memory=True if args.dataset == 'imagenet' else False)
criterion = nn.CrossEntropyLoss()
# a function for building networks
def build_network(Conv, Block):
if args.network == 'WideResNet':
return WideResNet(args.wrn_depth, args.wrn_width, Conv, Block,
num_classes=num_classes, dropRate=0, s=args.AT_split)
elif args.network == 'WRN_50_2':
return WRN_50_2(Conv)
elif args.network == 'MobileNetV2':
return MobileNetV2(Conv)
elif args.network == 'DARTS':
return DARTS(Conv, num_classes=num_classes)
# if a budget is specified, figure out what we have to set the
# hyperparameter to
if args.budget is not None:
def eval_network_size(hyperparam):
net = build_network(*what_conv_block(args.conv+"_%s"%hyperparam, args.blocktype, args.module))
return n_params(net)
hyperparam = set_for_budget(eval_network_size, args.conv, args.budget)
args.conv = args.conv + "_%s"%hyperparam
# get the classes implementing the Conv and Blocks we're going to use in
# the network
Conv, Block = what_conv_block(args.conv, args.blocktype, args.module)
def load_network(loc):
net_checkpoint = torch.load(loc)
start_epoch = net_checkpoint['epoch']
SavedConv, SavedBlock = what_conv_block(net_checkpoint['conv'],
net_checkpoint['blocktype'], net_checkpoint['module'])
net = build_network(SavedConv, SavedBlock).cuda()
torch.save(net.state_dict(), "checkpoints/darts.template.t7")
net.load_state_dict(net_checkpoint['net'])
return net, start_epoch
if args.mode == 'teacher':
if args.resume:
print('Mode Teacher: Loading teacher and continuing training...')
teach, start_epoch = load_network('checkpoints/%s.t7' % args.teacher_checkpoint)
else:
print('Mode Teacher: Making a teacher network from scratch and training it...')
teach = build_network(Conv, Block).cuda()
if parallelise is not None:
teach = parallelise(teach)
parameters = teach.grouped_parameters(args.weight_decay) if not args.nocrswd else teach.parameters()
optimizer = optim.SGD(parameters,
lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = get_scheduler(optimizer, epoch_step, args)
def schedule_drop_path(epoch, net):
net.drop_path_prob = 0.2 * epoch / (start_epoch+args.epochs)
# Decay the learning rate depending on the epoch
for e in range(0,start_epoch):
scheduler.step()
for epoch in tqdm(range(start_epoch, args.epochs)):
scheduler.step()
if args.network == 'DARTS': schedule_drop_path(epoch, teach)
print('Teacher Epoch %d:' % epoch)
print('Learning rate is %s' % [v['lr'] for v in optimizer.param_groups][0])
writer.add_scalar('learning_rate', [v['lr'] for v in optimizer.param_groups][0], epoch)
train_teacher(teach)
validate(teach, args.teacher_checkpoint)
elif args.mode == 'student':
print('Mode Student: First, load a teacher network and convert for (optional) attention transfer')
teach, _ = load_network('checkpoints/%s.t7' % args.teacher_checkpoint)
if parallelise is not None:
teach = parallelise(teach)
# Very important to explicitly say we require no gradients for the teacher network
for param in teach.parameters():
param.requires_grad = False
validate(teach)
val_losses, val_errors = [], [] # or we'd save the teacher's error as the first entry
if args.resume:
print('Mode Student: Loading student and continuing training...')
student, start_epoch = load_network('checkpoints/%s.t7' % args.student_checkpoint)
else:
print('Mode Student: Making a student network from scratch and training it...')
student = build_network(Conv, Block).cuda()
if parallelise is not None:
student = parallelise(student)
parameters = student.grouped_parameters(args.weight_decay) if not args.nocrswd else student.parameters()
optimizer = optim.SGD(parameters,
lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = get_scheduler(optimizer, epoch_step, args)
def schedule_drop_path(epoch, net):
net.drop_path_prob = 0.2 * epoch / (start_epoch+args.epochs)
# Decay the learning rate depending on the epoch
for e in range(0, start_epoch):
scheduler.step()
for epoch in tqdm(range(start_epoch, args.epochs)):
scheduler.step()
if args.network == 'DARTS': schedule_drop_path(epoch, student)
print('Student Epoch %d:' % epoch)
print('Learning rate is %s' % [v['lr'] for v in optimizer.param_groups][0])
writer.add_scalar('learning_rate', [v['lr'] for v in optimizer.param_groups][0], epoch)
train_student(student, teach)
validate(student, args.student_checkpoint)
| 25,508 | 39.426307 | 141 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.