repo
stringlengths
1
99
file
stringlengths
13
215
code
stringlengths
12
59.2M
file_length
int64
12
59.2M
avg_line_length
float64
3.82
1.48M
max_line_length
int64
12
2.51M
extension_type
stringclasses
1 value
AlignShift
AlignShift-master/nn/converter.py
import torch.nn as nn import functools import torch from ..utiles import _triple_same from collections import OrderedDict #TODO #1#通过名字来跳过相应的层。 #2#2d weighjts 转3d weights #3# sycnb class Converter(): """ Decorator class for converting 2d convolution modules to corresponding conv version in any networks. Args: model (torch.nn.module): model that needs to be converted target_conv_cfg(dict): original 2d conv configuration will be overwrited by the target conv config if original conv atrribute name is in the key of target conv config. additional_forward_fts(list): List of addtional augument names in target conv forward function, default is [] skip_first_conv: if skip applying addtional operation on first conv, default is True first_conv_in_channles:the number of input channels in first conv after converter, default is 1 Warnings: Functions in torch.nn.functional involved in data dimension are not supported Examples: >>> import Converter >>> import torchvision >>> from convs import AlignShiftConv >>> # m is a standard pytorch model >>> m = torchvision.models.resnet18(True) >>> alignshift_conv_cfg = dict(conv_type=AlignShiftConv, n_fold=8, alignshift=True, inplace=True, ref_spacing=0.2, shift_padding_zero=True) >>> m = Converter(m, alignshift_conv_cfg, additional_forward_fts=['thickness'], skip_first_conv=True, first_conv_input_channles=1) >>> # after converted, m is using AlignShiftConv and capable of processing 3D volumes >>> x = torch.rand(batch_size, in_channels, D, H, W) >>> thickness = torch.rand(batch_size, 1) >>> out = m(x, thickness) """ converter_attributes = ['model', 'target_conv_cfg', 'first_conv_in_channles', 'target_conv', 'skip_first_conv', 'additional_forward_fts'] def __init__(self, model, target_conv_cfg, additional_forward_fts=[], skip_first_conv=True, first_conv_input_channles=1): self.target_conv_cfg = target_conv_cfg self.first_conv_in_channles = first_conv_input_channles self.target_conv = target_conv_cfg.pop('conv_type') self.skip_first_conv = skip_first_conv self.model = model self.check_first_conv_channel() preserve_state_dict = model.state_dict() self.model = self.convert_module(self.model, self.target_conv, target_conv_cfg) self.model.load_state_dict(preserve_state_dict,strict=False) # del preserve_state_dict if additional_forward_fts!=[]: self.additional_forward_fts = additional_forward_fts self.hook_model_forward() def _top_forward(self, f): @functools.wraps(f) def wrapper(*args, **kwargs): # Save spacing in Converter instance self.add_kwargs = {} if len(args) > 2: self.add_kwargs.update({k:v for k,v in zip(self.additional_forward_fts[:len(args[2:])], args[2:])}) # In case spacing is not passed or passed as keyword argument self.add_kwargs.update(kwargs) # Execute the vanilla forward function output = f(*args[:2]) return output return wrapper def convert_conv_kwargs(self, kwargs): ''' return target conv configuration combined with original 2d conv configuration ''' kwargs['bias'] = True if kwargs['bias'] is not None or kwargs['bias'] else False kwargs.update(self.target_conv_cfg) if self.skip_first_conv and kwargs.get('in_channels') == self.first_conv_in_channles: kwargs.update({self.target_conv.__name__.lower().replace('conv',''):False}) return kwargs def hook_model_forward(self): self.target_conv.forward = self._module_forward(self.target_conv.forward) self.model.__class__.forward = self._top_forward(self.model.__class__.forward) def _module_forward(self, f): @functools.wraps(f) def wrapper(*args, **kwargs): # Get spacing from AlignWrapper instance kwargs.update(self.add_kwargs) return f(*args, **kwargs) return wrapper def check_first_conv_channel(self): ''' change input channle in first conv and load weights ''' for name, m in self.model.named_modules(): if isinstance(m, nn.Conv2d): ori_in_channels = m.weight.shape[1] a = self.first_conv_in_channles // ori_in_channels + 1 b = self.first_conv_in_channles % ori_in_channels + 1 m.weight.data = m.weight.repeat(1,a,1,1).data[:,b//2:b//2-b,...] m.in_channels = self.first_conv_in_channles return def convert_weights_to_3d(self): ''' convert 2d weights to 3d ''' raise NotImplementedError def convert_module(self, module, target_conv, conv_kwargs={}): """ A recursive function. Treat the entire model as a tree and convert each leaf module to target_conv if it's Conv2d, 3d counterparts if it's a pooling or normalization module, trilinear mode if it's a Upsample module. """ for child_name, child in module.named_children(): if isinstance(child, nn.Conv2d): arguments = nn.Conv2d.__init__.__code__.co_varnames[1:] kwargs = {k: getattr(child, k) for k in arguments} kwargs = self.convert_conv_kwargs(kwargs) setattr(module, child_name, target_conv(**kwargs)) elif hasattr(nn, child.__class__.__name__) and \ ('pool' in child.__class__.__name__.lower() or 'norm' in child.__class__.__name__.lower()): if hasattr(nn, child.__class__.__name__.replace('2d', '3d')): TargetClass = getattr(nn, child.__class__.__name__.replace('2d', '3d')) arguments = TargetClass.__init__.__code__.co_varnames[1:] kwargs = {k: getattr(child, k) for k in arguments} if 'adaptive' in child.__class__.__name__.lower(): for k in kwargs.keys(): kwargs[k] = _triple_same(kwargs[k]) setattr(module, child_name, TargetClass(**kwargs)) else: raise Exception('No corresponding module in 3D for 2d module {}'.format(child.__class__.__name__)) elif isinstance(child, nn.Upsample): arguments = nn.Upsample.__init__.__code__.co_varnames[1:] kwargs = {k: getattr(child, k) for k in arguments} kwargs['mode'] = 'trilinear' if kwargs['mode']=='bilinear' else kwargs['mode'] setattr(module, child_name, nn.Upsample(**kwargs)) else: self.convert_module(child, target_conv, conv_kwargs) return module def __getattr__(self, attr): return getattr(self.model, attr) def __setattr__(self, name, value): if name in self.__class__.converter_attributes: return object.__setattr__(self, name, value) else: return setattr(self.model, name, value) def __call__(self, *args, **kwargs): return self.model(*args, **kwargs) def __repr__(self): return self.__class__.__name__ + '(\n' + self.model.__repr__() + '\n)'
7,688
45.319277
185
py
AlignShift
AlignShift-master/nn/models/truncated_densenet3d_alignshift.py
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, from collections import namedtuple import torch import torch.nn.functional as F from torch import nn import torchvision.models as models from torchvision.models.densenet import model_urls import math import torch.utils.model_zoo as model_zoo import re from collections import OrderedDict from mmdet.models.registry import BACKBONES from nn.operators import AlignShiftConv import torch.utils.checkpoint as cp from mmdet.models.utils import build_conv_layer, build_norm_layer # mybn = nn.BatchNorm3d # mybn = nn.SyncBatchNorm norm_cfg = dict(type='SyncBN') def _bn_function_factory(norm, relu, conv, thickness): def bn_function(*inputs): concated_features = torch.cat(inputs, 1) bottleneck_output = conv(relu(norm(concated_features)), thickness) return bottleneck_output return bn_function class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, n_fold, memory_efficient=False, ref_thickness=None): super(_DenseLayer, self).__init__() self.add_module('norm1', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]), self.add_module('relu1', nn.ReLU(inplace=False)), self.add_module('conv1', AlignShiftConv(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, alignshift=True, bias=False, ref_thickness=ref_thickness, n_fold=n_fold,)), self.add_module('norm2', build_norm_layer(norm_cfg, bn_size * growth_rate, postfix=1)[1]), self.add_module('relu2', nn.ReLU(inplace=False)), self.add_module('conv2', AlignShiftConv(bn_size * growth_rate, growth_rate, alignshift=True, kernel_size=3, stride=1, padding=1, bias=False, ref_thickness=ref_thickness, n_fold=n_fold)), self.drop_rate = drop_rate self.memory_efficient = memory_efficient def forward(self, *prev_features, thickness): bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1, thickness=thickness) if self.memory_efficient:# and any(prev_feature.requires_grad for prev_feature in prev_features):hyadd 这里自己设计节省效率 bottleneck_output = cp.checkpoint(bn_function, *prev_features) else: bottleneck_output = bn_function(*prev_features, thickness=thickness) new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)), thickness) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class _DenseBlock(nn.Module): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, n_fold, memory_efficient=False, ref_thickness=None): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient, ref_thickness=ref_thickness, n_fold=n_fold ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features, thickness): features = [init_features] for name, layer in self.named_children(): new_features = layer(*features, thickness=thickness) features.append(new_features) return torch.cat(features, 1) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features): super().__init__() self.add_module('norm', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', AlignShiftConv(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False, alignshift=False)) self.add_module('pool', nn.AvgPool3d(kernel_size=[1, 2, 2], stride=[1, 2, 2])) class _Reduction_z(nn.Sequential): def __init__(self, input_features, input_slice): super().__init__() self.add_module('reduction_z_conv', nn.Conv3d(input_features, input_features, kernel_size=[input_slice, 1, 1], stride=1, bias=False)) # self.add_module('reduction_z_pooling', nn.AvgPool3d(kernel_size=[input_slice, 1, 1], stride=1)) @BACKBONES.register_module class DenseNetCustomTrunc3dAlign(nn.Module): def __init__(self, out_dim=256, n_cts=3, fpn_finest_layer=1, ref_thickness=2.0, n_fold=8, memory_efficient=True): super().__init__() self.depth = 121 self.feature_upsample = True self.fpn_finest_layer = fpn_finest_layer self.out_dim = out_dim self.n_cts = n_cts self.mid_ct = n_cts//2 self.ref_thickness = ref_thickness self.n_fold = n_fold assert self.depth in [121] if self.depth == 121: num_init_features = 64 growth_rate = 32 block_config = (6, 12, 24) self.in_dim = [64, 256, 512, 1024] bn_size = 4 drop_rate = 0 # First convolution self.conv0 = AlignShiftConv(1, num_init_features, kernel_size=7, stride=2, padding=3, bias=False, alignshift=False) self.norm0 = build_norm_layer(norm_cfg, num_init_features, postfix=1)[1] self.relu0 = nn.ReLU(inplace=True) self.pool0 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, ref_thickness=self.ref_thickness, n_fold=self.n_fold, memory_efficient=memory_efficient) self.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate reductionz = _Reduction_z(num_features, self.n_cts) # normrelu = _StageNormRelu(num_features) # self.add_module('normrelu%d' % (i + 1), normrelu) self.add_module('reductionz%d' % (i + 1), reductionz) if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm # self.add_module('norm5', nn.BatchNorm2d(num_features)) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight.data) elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() if self.feature_upsample: for p in range(4, self.fpn_finest_layer - 1, -1): layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1) name = 'lateral%d' % p self.add_module(name, layer) nn.init.kaiming_uniform_(layer.weight, a=1) nn.init.constant_(layer.bias, 0) self.init_weights() # if syncbn: # self = nn.SyncBatchNorm.convert_sync_batchnorm(self) def forward(self, x, thickness): x = self.conv0(x) x = self.norm0(x) x = self.relu0(x) x = self.pool0(x) x = self.denseblock1(x, thickness) # x = self.normrelu1(x) redc1 = self.reductionz1(x) x = self.transition1(x) x = self.denseblock2(x, thickness) # x = self.normrelu2(x) redc2 = self.reductionz2(x) x = self.transition2(x) x = self.denseblock3(x, thickness) # x = self.normrelu3(x) redc3 = self.reductionz3(x) # truncated since here since we find it works better in DeepLesion # ts3 = self.transition3(db3) # db4 = self.denseblock4(ts3) # if self.feature_upsample: ftmaps = [None, redc1.squeeze(2), redc2.squeeze(2), redc3.squeeze(2)] x = self.lateral4(ftmaps[-1]) for p in range(3, self.fpn_finest_layer - 1, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") y = ftmaps[p-1] lateral = getattr(self, 'lateral%d' % p)(y) x += lateral return [x] # else: # return [db3] def init_weights(self, pretrained=True): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] state_dict1 = {} for key in list(state_dict.keys()): new_key = key.replace('features.', '') if state_dict[key].dim() == 4: t0 = state_dict[key].shape[1] state_dict1[new_key] = state_dict[key]#.unsqueeze(2)#.repeat((1,1,self.n_cts,1,1))/self.n_cts if t0 == 3: state_dict1[new_key] = state_dict1[new_key][:,1:2,...] else: state_dict1[new_key] = state_dict[key] key = self.load_state_dict(state_dict1, strict=False) #print(key) def freeze(self): for name, param in self.named_parameters(): print('freezing', name) param.requires_grad = False
10,288
42.413502
140
py
AlignShift
AlignShift-master/nn/models/truncated_densenet3d_a3d.py
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, from collections import namedtuple import torch import torch.nn.functional as F from torch import nn import torchvision.models as models from torchvision.models.densenet import model_urls import math import torch.utils.model_zoo as model_zoo import re from collections import OrderedDict from mmdet.models.registry import BACKBONES from nn.operators import A3DConv import torch.utils.checkpoint as cp from mmdet.models.utils import build_conv_layer, build_norm_layer # mybn = nn.BatchNorm3d # mybn = nn.SyncBatchNorm norm_cfg = dict(type='SyncBN') def _bn_function_factory(norm, relu, conv): def bn_function(*inputs): concated_features = torch.cat(inputs, 1) bottleneck_output = conv(relu(norm(concated_features))) return bottleneck_output return bn_function class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, dimension, memory_efficient=False): super(_DenseLayer, self).__init__() self.add_module('norm1', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]), self.add_module('relu1', nn.ReLU(inplace=True)), self.add_module('conv1', A3DConv(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, enable_shift=True, dimension=dimension, bias=False)), self.add_module('norm2', build_norm_layer(norm_cfg, bn_size * growth_rate, postfix=1)[1]), self.add_module('relu2', nn.ReLU(inplace=True)), self.add_module('conv2', A3DConv(bn_size * growth_rate, growth_rate, enable_shift=True, kernel_size=3, stride=1, padding=1, dimension=dimension, bias=False)), self.drop_rate = drop_rate self.memory_efficient = memory_efficient def forward(self, *prev_features): bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1) if self.memory_efficient:# and any(prev_feature.requires_grad for prev_feature in prev_features):hyadd 这里自己设计节省效率 bottleneck_output = cp.checkpoint(bn_function, *prev_features) else: bottleneck_output = bn_function(*prev_features) new_features = self.conv2(self.relu2(self.norm2(bottleneck_output))) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class _DenseBlock(nn.Module): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, dimension=None, memory_efficient=False): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, dimension=dimension, memory_efficient=memory_efficient, ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for name, layer in self.named_children(): new_features = layer(*features) features.append(new_features) return torch.cat(features, 1) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features): super().__init__() self.add_module('norm', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', A3DConv(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False, enable_shift=False)) self.add_module('pool', nn.AvgPool3d(kernel_size=[1, 2, 2], stride=[1, 2, 2])) class _Reduction_z(nn.Sequential): def __init__(self, input_features, input_slice): super().__init__() self.add_module('reduction_z_conv', nn.Conv3d(input_features, input_features, kernel_size=[input_slice, 1, 1], stride=1, bias=False)) # self.add_module('reduction_z_pooling', nn.AvgPool3d(kernel_size=[input_slice, 1, 1], stride=1)) @BACKBONES.register_module class DenseNetCustomTrunc3dA3D(nn.Module): def __init__(self, out_dim=256, n_cts=3, fpn_finest_layer=1, memory_efficient=True): super().__init__() self.depth = 121 self.feature_upsample = True self.fpn_finest_layer = fpn_finest_layer self.out_dim = out_dim self.n_cts = n_cts self.mid_ct = n_cts//2 assert self.depth in [121] if self.depth == 121: num_init_features = 64 growth_rate = 32 block_config = (6, 12, 24) self.in_dim = [64, 256, 512, 1024] bn_size = 4 drop_rate = 0 # First convolution self.conv0 = A3DConv(1, num_init_features, kernel_size=7, stride=2, padding=3, bias=False, enable_shift=False) self.norm0 = build_norm_layer(norm_cfg, num_init_features, postfix=1)[1] self.relu0 = nn.ReLU(inplace=True) self.pool0 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, dimension=self.n_cts, memory_efficient=memory_efficient) self.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate reductionz = _Reduction_z(num_features, self.n_cts) # normrelu = _StageNormRelu(num_features) # self.add_module('normrelu%d' % (i + 1), normrelu) self.add_module('reductionz%d' % (i + 1), reductionz) if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm # self.add_module('norm5', nn.BatchNorm2d(num_features)) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight.data) elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() # elif isinstance(m, nn.Linear): # m.bias.data.zero_() if self.feature_upsample: for p in range(4, self.fpn_finest_layer - 1, -1): layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1) name = 'lateral%d' % p self.add_module(name, layer) nn.init.kaiming_uniform_(layer.weight, a=1) nn.init.constant_(layer.bias, 0) self.init_weights() # if syncbn: # self = nn.SyncBatchNorm.convert_sync_batchnorm(self) def forward(self, x): x = self.conv0(x) x = self.norm0(x) x = self.relu0(x) x = self.pool0(x) x = self.denseblock1(x) # x = self.normrelu1(x) redc1 = self.reductionz1(x) x = self.transition1(x) x = self.denseblock2(x) # x = self.normrelu2(x) redc2 = self.reductionz2(x) x = self.transition2(x) x = self.denseblock3(x) # x = self.normrelu3(x) redc3 = self.reductionz3(x) # truncated since here since we find it works better in DeepLesion # ts3 = self.transition3(db3) # db4 = self.denseblock4(ts3) # if self.feature_upsample: ftmaps = [None, redc1.squeeze(2), redc2.squeeze(2), redc3.squeeze(2)] x = self.lateral4(ftmaps[-1]) for p in range(3, self.fpn_finest_layer - 1, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") y = ftmaps[p-1] lateral = getattr(self, 'lateral%d' % p)(y) x += lateral return [x] # else: # return [db3] def init_weights(self, pretrained=True): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] state_dict1 = {} for key in list(state_dict.keys()): new_key = key.replace('features.', '') if state_dict[key].dim() == 4: t0 = state_dict[key].shape[1] state_dict1[new_key] = state_dict[key]#.unsqueeze(2)#.repeat((1,1,self.n_cts,1,1))/self.n_cts if t0 == 3: state_dict1[new_key] = state_dict1[new_key][:,1:2,...] else: state_dict1[new_key] = state_dict[key] key = self.load_state_dict(state_dict1, strict=False) # print(key) def freeze(self): for name, param in self.named_parameters(): print('freezing', name) param.requires_grad = False
9,821
41.154506
128
py
AlignShift
AlignShift-master/nn/models/truncated_densenet3d_tsm.py
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, from collections import namedtuple import torch import torch.nn.functional as F from torch import nn import torchvision.models as models from torchvision.models.densenet import model_urls import math import torch.utils.model_zoo as model_zoo import re from collections import OrderedDict from mmdet.models.registry import BACKBONES from nn.operators.tsmconv import TSMConv import torch.utils.checkpoint as cp from mmdet.models.utils import build_conv_layer, build_norm_layer # mybn = nn.BatchNorm3d # mybn = nn.SyncBatchNorm norm_cfg = dict(type='SyncBN') def _bn_function_factory(norm, relu, conv): def bn_function(*inputs): concated_features = torch.cat(inputs, 1) bottleneck_output = conv(relu(norm(concated_features))) return bottleneck_output return bn_function class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, n_fold, memory_efficient=False): super(_DenseLayer, self).__init__() self.add_module('norm1', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]), self.add_module('relu1', nn.ReLU(inplace=False)), self.add_module('conv1', TSMConv(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False, n_fold=n_fold)), self.add_module('norm2', build_norm_layer(norm_cfg, bn_size * growth_rate, postfix=1)[1]), self.add_module('relu2', nn.ReLU(inplace=False)), self.add_module('conv2', TSMConv(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False, n_fold=n_fold)), self.drop_rate = drop_rate self.memory_efficient = memory_efficient def forward(self, *prev_features): bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1) if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features): bottleneck_output = cp.checkpoint(bn_function, *prev_features) else: bottleneck_output = bn_function(*prev_features) new_features = self.conv2(self.relu2(self.norm2(bottleneck_output))) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class _DenseBlock(nn.Module): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, n_fold, memory_efficient=False): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient, n_fold=n_fold, ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for name, layer in self.named_children(): new_features = layer(*features) features.append(new_features) return torch.cat(features, 1) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features): super(_Transition, self).__init__() self.add_module('norm', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', TSMConv(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False, tsm=False)) self.add_module('pool', nn.AvgPool3d(kernel_size=[1, 2, 2], stride=[1, 2, 2]))#, padding=[0,1,1] class _Reduction_z(nn.Sequential): def __init__(self, input_features, input_slice): super().__init__() self.add_module('reduction_z_conv', nn.Conv3d(input_features, input_features, kernel_size=[input_slice, 1, 1], stride=1, bias=False)) @BACKBONES.register_module class DenseNetCustomTrunc3dTSM(nn.Module): def __init__(self, out_dim=256, n_cts=3, fpn_finest_layer=1, memory_efficient=True, n_fold=8,): super().__init__() self.depth = 121 self.feature_upsample = True self.fpn_finest_layer = fpn_finest_layer self.out_dim = out_dim self.n_cts = n_cts self.mid_ct = n_cts//2 self.n_fold = n_fold assert self.depth in [121] if self.depth == 121: num_init_features = 64 growth_rate = 32 block_config = (6, 12, 24) self.in_dim = [64, 256, 512, 1024] bn_size = 4 drop_rate = 0 # First convolution self.conv0 = TSMConv(1, num_init_features, kernel_size=7, stride=2, padding=3, bias=False, tsm=False) self.norm0 = build_norm_layer(norm_cfg, num_init_features, postfix=1)[1] self.relu0 = nn.ReLU(inplace=True) self.pool0 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, n_fold=self.n_fold, memory_efficient=memory_efficient) self.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate reductionz = _Reduction_z(num_features, self.n_cts) self.add_module('reductionz%d' % (i + 1), reductionz) if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm # self.add_module('norm5', nn.BatchNorm2d(num_features)) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight.data) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() if self.feature_upsample: for p in range(4, self.fpn_finest_layer - 1, -1): layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1) name = 'lateral%d' % p self.add_module(name, layer) nn.init.kaiming_uniform_(layer.weight, a=1) nn.init.constant_(layer.bias, 0) self.init_weights() # if syncbn: # self = nn.SyncBatchNorm.convert_sync_batchnorm(self) def forward(self, x): x = self.conv0(x) x = self.norm0(x) x = self.relu0(x) x = self.pool0(x) x = self.denseblock1(x) redc1 = self.reductionz1(x) x = self.transition1(x) x = self.denseblock2(x) redc2 = self.reductionz2(x) x = self.transition2(x) x = self.denseblock3(x) redc3 = self.reductionz3(x) # truncated since here since we find it works better in DeepLesion # ts3 = self.transition3(db3) # db4 = self.denseblock4(ts3) # if self.feature_upsample: ftmaps = [None, redc1.squeeze(2), redc2.squeeze(2), redc3.squeeze(2)] x = self.lateral4(ftmaps[-1]) for p in range(3, self.fpn_finest_layer - 1, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") y = ftmaps[p-1] lateral = getattr(self, 'lateral%d' % p)(y) x += lateral return [x] def init_weights(self, pretrained=True): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] state_dict1 = {} for key in list(state_dict.keys()): new_key = key.replace('features.', '') if state_dict[key].dim() == 4: t0 = state_dict[key].shape[1] state_dict1[new_key] = state_dict[key]#.unsqueeze(2)#.repeat((1,1,self.n_cts,1,1))/self.n_cts if t0 == 3: state_dict1[new_key] = state_dict1[new_key][:,1:2,...] else: state_dict1[new_key] = state_dict[key] self.load_state_dict(state_dict1, strict=False) def freeze(self): for name, param in self.named_parameters(): print('freezing', name) param.requires_grad = False
9,413
41.026786
120
py
AlignShift
AlignShift-master/nn/operators/tsmconv.py
import torch from torch.nn.modules.conv import _ConvNd from torch.nn import functional as F import numpy as np from ..utiles import as_triple, _pair_same class TSMConv(_ConvNd): ''' Args: n_fold (int): Divisor of channels to shift. tsm(bool) : if apply tsm operation before conv inplace (bool): if Enable inplace opertion. shift_padding_zero(bool): if padding zeros to side fold before shift channels ''' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', n_fold=8, tsm=True, inplace=False, shift_padding_zero=True): kernel_size = _pair_same(kernel_size) stride = as_triple(stride) padding = as_triple(padding, d_value=0) dilation = as_triple(dilation) super().__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, as_triple(0,0), groups, bias, padding_mode) self.n_fold = n_fold self.enable_tsm = tsm self.inplace = inplace self.shift_padding_zero = shift_padding_zero def tsm(self, input, fold): out = torch.zeros_like(input) out[:, :fold, :-1] = input[:, :fold, 1:] out[:, fold: 2 * fold, 1:] = input[:, fold: 2 * fold, :-1] out[:, 2 * fold:, :] = input[: , 2 * fold:, :] return out def tsm_shift(self, x, fold, inplace): if inplace: x = inplace_tsm(x, fold) else: x = self.tsm(x, fold) return x def forward(self, input): if self.enable_tsm: _, c, _, _, _ = input.size() fold = c // self.n_fold input = self.tsm_shift(input, fold, self.inplace) return F.conv3d(input, self.weight.unsqueeze(2), self.bias, self.stride, self.padding, self.dilation, self.groups) def extra_repr(self): s = super().extra_repr() + ', tsm={self.tsm}' return s.format(**self.__dict__) class InplaceTSM(torch.autograd.Function): @staticmethod def forward(ctx, input, fold): ''' @params: input: BxCxDxHxW fold: chennels of shift part ''' # not support higher order gradient n, c, t, h, w = input.size() ctx.fold_ = fold buffer = input.data.new(n, fold, t, h, w).zero_() buffer[:, :, :-1] = input.data[:, :fold, 1:] input.data[:, :fold, :] = buffer buffer.zero_() buffer[:, :, 1:] = input.data[:, fold: 2 * fold, :-1] input.data[:, fold: 2 * fold, :] = buffer return input @staticmethod def backward(ctx, grad_output): fold = ctx.fold_ n, c, t, h, w = grad_output.size() buffer = grad_output.data.new(n, fold, t, h, w).zero_() buffer[:, :, 1:] = grad_output.data[:, :fold, :-1] grad_output.data[:, :fold, :] = buffer buffer.zero_() buffer[:, :, :-1] = grad_output.data[:, fold: 2 * fold, 1:] grad_output.data[:, fold: 2 * fold, :] = buffer return grad_output, None inplace_tsm = InplaceTSM.apply
3,243
32.791667
86
py
AlignShift
AlignShift-master/nn/operators/a3dconv.py
import torch from torch.nn.modules.conv import _ConvNd from torch.nn import functional as F from torch.nn import init import numpy as np from ..utiles import as_triple, _pair_same from torch.nn.parameter import Parameter class A3DConv(_ConvNd): ''' Args: dimension (int): number of dimensions to shift. inplace (bool): if Enable inplace operation. enable_shift(bool): if enable shift ''' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', dimension=3, enable_shift=True, inplace=False): kernel_size = _pair_same(kernel_size) stride = as_triple(stride) padding = as_triple(padding, 0) dilation = as_triple(dilation) super().__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, as_triple(0,0), groups, bias, padding_mode) self.enable_shift = enable_shift self.inplace = inplace self._init_adaptive_weights(in_channels, dimension) def _init_adaptive_weights(self, in_channels, dimension): self.adaptive_align_weights = Parameter(torch.Tensor(dimension, dimension, in_channels)) with torch.no_grad(): aw = torch.eye(dimension).unsqueeze(-1).repeat((1,1,in_channels)) noise = aw.new(aw.shape) init.uniform_(noise, -0.1, 0.1) self.adaptive_align_weights.data = aw + noise # self.adaptive_align_weights.requires_grad = True def _adaptive_shift(self, input): out = torch.einsum('bcdhw,dkc->bckhw', [input, self.adaptive_align_weights]) return out def adaptive_shift(self, x, inplace): if inplace: raise NotImplementedError else: x = self._adaptive_shift(x) return x def forward(self, input): if self.enable_shift: _, c, _, _, _ = input.size() input = self.adaptive_shift(input, self.inplace) return F.conv3d(input, self.weight.unsqueeze(2), self.bias, self.stride, self.padding, self.dilation, self.groups) def extra_repr(self): s = super().extra_repr() + f', adaptive_shift={self.enable_shift}' return s.format(**self.__dict__) def flops(self, input, output): batch_size = input.shape[0] output_dims = list(output.shape[2:]) kernel_dims = list(self.kernel_size) in_channels = self.in_channels out_channels = self.out_channels groups = self.groups filters_per_channel = out_channels // groups conv_per_position_flops = np.prod( kernel_dims) * in_channels * filters_per_channel active_elements_count = batch_size * np.prod(output_dims) if self.__mask__ is not None: # (b, 1, h, w) output_height, output_width = output.shape[2:] flops_mask = self.__mask__.expand(batch_size, 1, output_height, output_width) active_elements_count = flops_mask.sum() overall_conv_flops = conv_per_position_flops * active_elements_count bias_flops = 0 if self.bias is not None: bias_flops = out_channels * active_elements_count overall_flops = overall_conv_flops + bias_flops # adaptive_weights_flops if self.enable_shift: adaptive_align_weights_flops = np.prod(self.adaptive_align_weights.shape)# * in_channels ** 2 overall_flops += adaptive_align_weights_flops # return overall_flops
3,716
35.441176
105
py
AlignShift
AlignShift-master/nn/operators/alignshiftconv.py
import torch from torch.nn.modules.conv import _ConvNd from torch.nn import functional as F import numpy as np from ..utiles import as_triple, _pair_same class AlignShiftConv(_ConvNd): ''' Args: n_fold (int): Divisor of channels to shift. alignshift(bool): if apply alignshift operation before conv inplace (bool): if Enable inplace operation. ref_thickness (float): Reference z axis spacing Default: 0.2mm. shift_padding_zero(bool): f padding zeros to side fold before shift channels ''' def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', n_fold=8, alignshift=True, inplace=True, ref_thickness=2., shift_padding_zero=False): kernel_size = _pair_same(kernel_size) stride = as_triple(stride) padding = as_triple(padding, 0) dilation = as_triple(dilation) super().__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, False, as_triple(0,0), groups, bias, padding_mode) self.n_fold = n_fold self.enable_align = alignshift self.inplace = inplace self.ref_thickness = ref_thickness self.shift_padding_zero = shift_padding_zero def alignshift(self, input, fold, ref_thickness, thickness, padding_zero=True): alph = 1 - (ref_thickness / thickness).view(-1, 1 ,1, 1, 1).clamp_(0., 1.) out = torch.zeros_like(input) out[:, :fold, :-1] = input[:, :fold, :-1] * alph + input[:, :fold, 1:] * (1-alph) out[:, fold: 2 * fold, 1:] = input[:, fold: 2 * fold, 1:] * alph + \ input[:, fold: 2 * fold, :-1] * (1-alph) out[:, 2 * fold:, :] = input[: , 2 * fold:, :] pad_alph = alph if padding_zero else 1.0 out[:, :fold, -1:] = pad_alph * input[:, :fold, -1:] out[:, fold:2*fold, :1] = pad_alph * input[:, fold:2*fold, :1] return out def align_shift(self, x, fold, ref_thickness, thickness, padding_zero, inplace): if inplace: x = inplace_alignshift(x, fold, ref_thickness, thickness, padding_zero) else: x = self.alignshift(x, fold, ref_thickness, thickness, padding_zero) return x def forward(self, input, thickness=None): if self.enable_align: _, c, _, _, _ = input.size() fold = c // self.n_fold input = self.align_shift(input, fold, self.ref_thickness, \ thickness, self.shift_padding_zero, self.inplace) return F.conv3d(input, self.weight.unsqueeze(2), self.bias, self.stride, self.padding, self.dilation, self.groups) def extra_repr(self): s = super().extra_repr() + ', alignshift={self.alignshift}' return s.format(**self.__dict__) class InplaceAlignShift(torch.autograd.Function): @staticmethod def forward(ctx, input, fold, ref_thickness, thickness, padding_zero=True): ''' @params: input: BxCxDxHxW fold: channels to align align_spacing: align spacing thickness: thickness of the input ct padding_zero: bool, whether padding zero to outer cts ''' n, c, t, h, w = input.size() ctx.fold_ = fold ctx.padding_zero = padding_zero alph = 1 - (ref_thickness / thickness).view(-1, 1 ,1, 1, 1).clamp_(0., 1.) ##把小于ref_thickness的当作ref_thickness,不做插值处理 ctx.alph_ = alph input.data[:, :fold, :-1] = input.data[:, :fold, :-1] * alph + input.data[:, :fold, 1:] * (1-alph) input.data[:, fold:2*fold, 1:] = input.data[:, fold:2*fold, 1:] * alph + \ input.data[:, fold:2*fold, :-1] * (1-alph) if padding_zero: input.data[:, :fold, -1:] = input.data[:, :fold, -1:] * alph input.data[:, fold:2*fold, :1] = input.data[:, fold:2*fold, :1] * alph # input.data[:, :fold, -1:] = input.data[:, :fold, -1:] * 0.6 # input.data[:, fold:2*fold, :1] = input.data[:, fold:2*fold, :1] * 0.6 return input @staticmethod def backward(ctx, grad_output): fold = ctx.fold_ alph = ctx.alph_ padding_zero = ctx.padding_zero n, c, t, h, w = grad_output.size() pad_fold = alph if padding_zero else 1. grad_output.data[:, :fold, -1:] = grad_output.data[:, :fold, -1:] \ * pad_fold + grad_output.data[:, :fold, -2:-1] * (1-alph) grad_output.data[:, :fold, 1:-1] = grad_output.data[:, :fold, 1:-1] * alph\ + grad_output.data[:, :fold, :-2] * (1-alph) grad_output.data[:, :fold, :1] = grad_output.data[:, :fold, :1] * alph grad_output.data[:, fold:2*fold, :1] = grad_output.data[:, fold:2*fold, :1] * pad_fold\ + grad_output.data[:, fold:2*fold, 1:2] * (1-alph) grad_output.data[:, fold:2*fold, 1:-1] = grad_output.data[:, fold:2*fold, 1:-1] * alph\ + grad_output.data[:, fold:2*fold, 2:] * (1-alph) grad_output.data[:, fold:2*fold, -1:] = grad_output.data[:, fold:2*fold, -1:] * alph return grad_output, None, None, None, None inplace_alignshift = InplaceAlignShift.apply
5,671
48.321739
150
py
AlignShift
AlignShift-master/deeplesion/train_dist.py
from __future__ import division import argparse import sys #sys.path.append('add your worck space here') import os import torch torch.multiprocessing.set_sharing_strategy('file_system') from mmcv import Config from mmdet import __version__ from mmdet.apis import (get_root_logger, init_dist, set_random_seed, train_detector) from mmdet.datasets import build_dataset from mmdet.models import build_detector from collections import OrderedDict def parse_args(): parser = argparse.ArgumentParser(description='Train a detector') parser.add_argument('config', help='train config file path') parser.add_argument('--work_dir', help='the dir to save logs and models') parser.add_argument( '--resume_from', help='the checkpoint file to resume from') parser.add_argument( '--validate', action='store_true', default=False, help='whether to evaluate the checkpoint during training') parser.add_argument( '--gpus', type=int, default=1, help='number of gpus to use ' '(only applicable to non-distributed training)') parser.add_argument('--seed', type=int, default=None, help='random seed') parser.add_argument( '--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher') parser.add_argument('--local_rank', type=int, default=0) parser.add_argument( '--autoscale-lr', action='store_true', help='automatically scale lr with the number of gpus') args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) return args def main(args): cfg = Config.fromfile(args.config) os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True # update configs according to CLI args if args.work_dir is not None: cfg.work_dir = args.work_dir if args.resume_from is not None: cfg.resume_from = args.resume_from cfg.gpus = args.gpus if args.autoscale_lr: # apply the linear scaling rule (https://arxiv.org/abs/1706.02677) cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8 # init distributed env first, since logger depends on the dist info. if args.launcher == 'none': distributed = False else: distributed = True init_dist(args.launcher, **cfg.dist_params) # init logger before other steps logger = get_root_logger(cfg.log_level) logger.info('Distributed training: {}'.format(distributed)) logger.info('MMDetection Version: {}'.format(__version__)) logger.info('Config: {}'.format(cfg.text)) # set random seeds if args.seed is not None: logger.info('Set random seed to {}'.format(args.seed)) set_random_seed(args.seed) model = build_detector( cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg) # if cfg.weights2d_path[0] is not None and cfg.load_from is None: # expand_sd = pretrain2d_to_3d(model, cfg.weights2d_path[0]) # model.load_state_dict(expand_sd, strict=False) datasets = [build_dataset(cfg.data.train)] if len(cfg.workflow) == 2: datasets.append(build_dataset(cfg.data.val)) if cfg.checkpoint_config is not None: # save mmdet version, config file content and class names in # checkpoints as meta data cfg.checkpoint_config.meta = dict( mmdet_version=__version__, config=cfg.text, CLASSES=datasets[0].CLASSES) # add an attribute for visualization convenience validate = cfg.get('evaluation', False) model.CLASSES = datasets[0].CLASSES train_detector( model, datasets, cfg, distributed=distributed, validate=validate, logger=logger) if __name__ == '__main__': args = parse_args() main(args)
4,031
32.04918
77
py
AlignShift
AlignShift-master/deeplesion/eval.py
import torch torch.multiprocessing.set_sharing_strategy('file_system') import numpy as np from pycocotools import mask as mutils import sys # sys.path.append('/your alignshit path') import os # from dataset import DeepLesionDataset import cv2 import random import matplotlib.pyplot as plt from mmcv import Config import mmcv from mmcv.runner import get_dist_info, load_checkpoint from mmcv.parallel import MMDataParallel import random import pickle import gc import argparse from mmdet.datasets.registry import DATASETS, PIPELINES from mmdet.models.registry import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS) from mmdet.datasets import build_dataset, build_dataloader from mmdet.models import build_detector from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, force_fp32, multi_apply, multiclass_nms) # from deeplesion.evaluation.visualize import draw_bounding_boxes_on_image_array from deeplesion.evaluation.evaluation_metrics import sens_at_FP from deeplesion.evaluation.evaluation_metrics import sens_at_FP, IOU from skimage.measure import regionprops from pycocotools.mask import decode def parse_args(): parser = argparse.ArgumentParser(description='eval deeplesion') # parser.add_argument('config', help='train config file path') parser.add_argument('--config', default=None, help='config path') parser.add_argument('--checkpoint', help='checkpoint path') args = parser.parse_args() return args def generate_cfg(checkpoint): d = torch.load(checkpoint, map_location=torch.device('cpu')) cfg_path = checkpoint.replace('.pth','.py') if not os.path.exists(cfg_path): with open(cfg_path,'w') as f: f.write(d['meta']['config']) return cfg_path def get_model(cfg_path): cfg = Config.fromfile(cfg_path) cfg.data.imgs_per_gpu=1 cfg.model.pretrained = None cfg.data.test.test_mode = True cfg.data.val.test_mode = True # cfg.test_cfg.rcnn.score_thr=0.001 #build dataset print(cfg.description) dataset = build_dataset(cfg.data.test) data_loadertest = build_dataloader( dataset, imgs_per_gpu=1, workers_per_gpu=0, dist=False, shuffle=False) model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) model = MMDataParallel(model, device_ids=[0]) model.CLASSES = dataset.CLASSES return model, data_loadertest # def single_gpu_test(model, data_loader): # model.eval() # results = [] # # dataset = data_loader.dataset # prog_bar = mmcv.ProgressBar(len(data_loader)) # with torch.no_grad(): # for i, data in enumerate(data_loader): # gt_boxes = data.pop('gt_bboxes') # r = model(return_loss=False, rescale=False, **data) # # inference_time.append(time.time() - start_time) # data['gt_boxes'] = gt_boxes # data['bboxes'] = r[0] # data['segs'] = r[1] # # data['img'] = data['img'].data[0][data['img'].data[0].shape[0]//2] # data.pop('img') # results.append(data) # prog_bar.update() # return results def single_gpu_test(model, data_loader): model.eval() results = [] prog_bar = mmcv.ProgressBar(len(data_loader)) with torch.no_grad(): ann = data_loader.dataset.ann for i, data in enumerate(data_loader): gt_boxes = data.pop('gt_bboxes') r = model(return_loss=False, rescale=False, **data) j=0 res_dict={} res_dict['gt_boxes'] = gt_boxes.data[0][j].numpy() res_dict['bboxes'] = r[0][j] res_dict['segs'] = r[1][j] # res_dict['img'] = data['img'].data[j][0][0] dd = ann[i+j]['ann']['diameters'] res_dict['diameters'] = dd res_dict['img_meta'] = data['img_meta'].data[0][j] # data.pop('img') res_dict['gt_masks'] = data['gt_masks'].data[0][j] res_dict['spacing'] = ann[i]['ann']['spacing'] res_dict['recists'] = ann[i]['ann']['recists'] res_dict['thickness'] = ann[i]['ann']['slice_intv'] # res_dict['diameter_erro'], res_dict['pred_mask'], res_dict['gt_mask_my'], res_dict['pred_mask_index'] = mask_matrics(res_dict) results.append(res_dict) prog_bar.update() return results def write_metrics(outputs, log_path, epoch): avgFP = [0.5, 1, 2, 4, 8, 16] iou_th = 0.5 s1_box=[] s1_gt=[] s5_box=[] s5_gt=[] so_box=[] so_gt=[] so_seg_erro = [] for d in outputs: if d['thickness']<=2.: s1_box.append(np.vstack(d['bboxes'])) s1_gt.append(d['gt_boxes']) elif d['thickness']==5.: s5_box.append(np.vstack(d['bboxes'])) s5_gt.append(d['gt_boxes']) else: so_box.append(np.vstack(d['bboxes'])) so_gt.append(d['gt_boxes']) # so_seg_erro.extend(d['diameter_erro']) sens1 = sens_at_FP(s1_box, s1_gt, avgFP, iou_th) sens2 = sens_at_FP(s5_box, s5_gt, avgFP, iou_th) sens = sens_at_FP(s1_box+s5_box+so_box, s1_gt+s5_gt+so_gt, avgFP, iou_th) # so_seg_erro = np.array(so_seg_erro) # diameter_erro = so_seg_erro[so_seg_erro>-1].mean() s = str(epoch)+':\t'+str(sens)+'\t'+str(sens1)+'\t'+str(sens2)+f'\t \n'#diameter_erro:{diameter_erro}\n' print(s) # with open(log_path,'a+') as f: # f.write(s) return s def mask_matrics(output, iou_thresh=0.5): erro = [-1] * len(output['gt_boxes']) pred_mask = [] gt_mask = [] pred_mask_index = [] pred_mask_contours = [] for i, box in enumerate(output['gt_boxes']): iou1 = IOU(box, output['bboxes']) if len(iou1)==0: continue indx = iou1.argmax() try: if iou1[indx] > iou_thresh: d_seg = decode(output['segs'][indx]) pred_mask.append(output['segs'][indx]) l_seg = output['gt_masks'][i].astype(np.uint8) gt_mask.append(l_seg) # _, cnts = cv2.findContours(l_seg, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) pred_mask_index.append(indx) prop = regionprops(d_seg)[0] diameter = np.sqrt(prop.major_axis_lengt**2 + prop.minor_axis_length**2)/2 recists = output['recists'][i].reshape((4, 2)) l = np.linalg.norm(recists[0] - recists[1]) s = np.linalg.norm(recists[2] - recists[3]) diameter1 = np.sqrt(l**2 + s**2)/2 erro[i] = (abs(prop.major_axis_lengt-max(l,s)) + abs(prop.minor_axis_length-min(l,s))) * output['spacing'] # erro[i] = np.abs(diameter1 - diameter) * output['spacing'] except Exception as e: print(e) print(len(output['segs']), indx, i, output['bboxes'].shape, decode(output['segs'][indx]).sum()) return erro, pred_mask, gt_mask, pred_mask_index def main(checkpoint, cfg_path=None): if cfg_path is None: cfg_path = generate_cfg(checkpoint) print(cfg_path) model, dl = get_model(cfg_path) log_path = './log/metrix_log.txt' load_checkpoint(model, checkpoint, map_location='cpu', strict=True) outputs = single_gpu_test(model, dl) r = write_metrics(outputs, log_path, 'N/A') # save_output(outputs, os.path.basename(os.path.dirname(checkpoint))+'.pkl') with open(log_path,'a+') as f: f.write(checkpoint+':\n'+r) with open(os.path.dirname(checkpoint)+'metrics_log.txt','a+') as f: f.write(r) print(r) if __name__ =='__main__': # checkpoint_path = f'/mnt/data3/deeplesion/dl/work_dirs/densenet_3d_acs_r2/latest.pth' args = parse_args() checkpoint = args.checkpoint cfg_path = args.config#generate_cfg(checkpoint) main(checkpoint, cfg_path)
8,161
37.866667
156
py
AlignShift
AlignShift-master/deeplesion/evaluation/evaluation_metrics.py
import numpy as np from scipy import interpolate import torch def recall(pred, target): pred_label = pred.detach().clone() pred_label[pred_label>=0] = 1 pred_label[pred_label<0] = 0 pred_label = pred_label.flatten() correct = pred_label.eq(target) * target return correct.sum().to(torch.float)/target.sum() def eval_DL_detection(predictions, logger, is_validation): fns = sorted(predictions.keys()) all_boxes = [predictions[fn]['result'].bbox.numpy() for fn in fns] all_scores = [predictions[fn]['result'].get_field('scores').numpy() for fn in fns] all_boxes = [np.hstack((b, s.reshape((-1, 1)))) for (b, s) in zip(all_boxes, all_scores)] all_gts = [predictions[fn]['target'].bbox.cpu().numpy() for fn in fns] # detection det_res = sens_at_FP(all_boxes, all_gts, cfg.TEST.VAL_FROC_FP, cfg.TEST.IOU_TH) # cls 0 is background logger.info(', '.join(['%.4f'%v for v in det_res])) logger.info('mean of %s: %.4f', str(cfg.TEST.VAL_FROC_FP[:4]), np.mean(det_res[:4])) def sens_at_FP(boxes_all, gts_all, avgFP, iou_th): """compute the sensitivity at avgFP (average FP per image)""" # boxes_all = [list(b) for b in boxes_all_r] sens, fp_per_img = FROC(boxes_all, gts_all, iou_th) avgFP_in = [a for a in avgFP if a <= fp_per_img[-1]] avgFP_out = [a for a in avgFP if a > fp_per_img[-1]] f = interpolate.interp1d(fp_per_img, sens) res = np.hstack([f(np.array(avgFP_in)), np.ones((len(avgFP_out, )))*sens[-1]]) return res def FROC(boxes_all, gts_all, iou_th): """Compute the Free ROC curve, for single class only""" nImg = len(boxes_all) img_idxs = np.hstack([[i]*len(boxes_all[i]) for i in range(nImg)]).astype(int) boxes_cat = np.vstack(boxes_all) scores = boxes_cat[:, -1] ord = np.argsort(scores)[::-1] boxes_cat = boxes_cat[ord, :4] img_idxs = img_idxs[ord] hits = [np.zeros((len(gts),), dtype=bool) for gts in gts_all] nHits = 0 nMiss = 0 tps = [] fps = [] for i in range(len(boxes_cat)): overlaps = IOU(boxes_cat[i, :], gts_all[img_idxs[i]]) if len(overlaps) == 0 or overlaps.max() < iou_th: nMiss += 1 else: for j in range(len(overlaps)): if overlaps[j] >= iou_th and not hits[img_idxs[i]][j]: hits[img_idxs[i]][j] = True nHits += 1 tps.append(nHits) fps.append(nMiss) nGt = len(np.vstack(gts_all)) sens = np.array(tps, dtype=float) / nGt fp_per_img = np.array(fps, dtype=float) / nImg return sens, fp_per_img def IOU(box1, gts): """compute overlaps over intersection""" ixmin = np.maximum(gts[:, 0], box1[0]) iymin = np.maximum(gts[:, 1], box1[1]) ixmax = np.minimum(gts[:, 2], box1[2]) iymax = np.minimum(gts[:, 3], box1[3]) iw = np.maximum(ixmax - ixmin + 1., 0.) ih = np.maximum(iymax - iymin + 1., 0.) inters = iw * ih # union uni = ((box1[2] - box1[0] + 1.) * (box1[3] - box1[1] + 1.) + (gts[:, 2] - gts[:, 0] + 1.) * (gts[:, 3] - gts[:, 1] + 1.) - inters) overlaps = inters / uni # ovmax = np.max(overlaps) # jmax = np.argmax(overlaps) return overlaps
3,239
32.402062
106
py
AlignShift
AlignShift-master/deeplesion/evaluation/evaluation.py
from mmdet.core.evaluation.eval_hooks import DistEvalHook from deeplesion.evaluation.evaluation_metrics import sens_at_FP import numpy as np import torch.distributed as dist from mmcv.parallel import collate, scatter from mmcv.runner import Hook from torch.utils.data import Dataset from mmdet import datasets import mmcv import torch import os import os.path as osp class MyDeepLesionEval(Hook): def __init__(self, dataset, interval=1): if isinstance(dataset, Dataset): self.dataset = dataset elif isinstance(dataset, dict): self.dataset = datasets.build_dataset(dataset, {'test_mode': True}) else: raise TypeError( 'dataset must be a Dataset object or a dict, not {}'.format( type(dataset))) self.interval = interval def after_train_epoch(self, runner): if not self.every_n_epochs(runner, self.interval): return runner.model.eval() results = [None for _ in range(len(self.dataset))] # if runner.rank == 0: # prog_bar = mmcv.ProgressBar(len(self.dataset)) for idx in range(runner.rank, len(self.dataset), runner.world_size): data = self.dataset[idx] data_gpu = scatter( collate([data], samples_per_gpu=1), [torch.cuda.current_device()])[0] # compute output with torch.no_grad(): result = runner.model( return_loss=False, rescale=False, **data_gpu) results[idx] = (result[0], data_gpu['gt_bboxes'], data_gpu['thickness']) #batch_size = runner.world_size # if runner.rank == 0: # for _ in range(batch_size): # prog_bar.update() if runner.rank == 0: print('\n') dist.barrier() for i in range(1, runner.world_size): tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i)) tmp_results = mmcv.load(tmp_file) for idx in range(i, len(results), runner.world_size): results[idx] = tmp_results[idx] os.remove(tmp_file) self.evaluate(runner, results) else: tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank)) mmcv.dump(results, tmp_file) dist.barrier() dist.barrier() def evaluate(self, runner, results): gt_bboxes = [] gt_labels = [] pred_bboxes = [] s1_box=[] s1_gt=[] s5_box=[] s5_gt=[] for i in range(len(results)): pred_bboxes.append(results[i][0][0]) gt_bboxes.append(results[i][1][0].cpu().numpy()) if results[i][2][0][0]<=2.: s1_box.append(pred_bboxes[-1]) s1_gt.append(gt_bboxes[-1]) if results[i][2][0][0] > 2.: s5_box.append(pred_bboxes[-1]) s5_gt.append(gt_bboxes[-1]) avgFP=[0.5, 1, 2, 4, 8, 16] iou_th_astrue=0.5 # try: r = sens_at_FP(pred_bboxes, gt_bboxes, avgFP, iou_th_astrue) # except Exception as e: # print('froc crash!\n',e) # return r1 = sens_at_FP(s1_box, s1_gt, avgFP, iou_th_astrue) r2 = sens_at_FP(s5_box, s5_gt, avgFP, iou_th_astrue) # except Exception as e: # print('froc crash!\n',e) # return runner.logger.info(f"{r}") with open('./logs/log_all_metrics.txt','a') as f: f.writelines(f"{runner.epoch+1}:{r}:\t{runner.work_dir.split('/')[-1]}\t{runner.cfg.description}\t{r1}\t{r2}\n")
3,745
36.089109
124
py
AlignShift
AlignShift-master/deeplesion/dataset/DeepLesionDataset_25d.py
import numpy as np import os import csv import cv2 import logging from pycocotools import mask as mutils from mmcv import Config import torch import os from mmdet.datasets.registry import DATASETS import pickle from mmdet.datasets.pipelines import Compose from mmdet.datasets.custom import CustomDataset @DATASETS.register_module class DeepLesionDataset25d(CustomDataset): CLASSES = ('lesion') def __init__(self, ann_file, pipeline, pre_pipeline, dicm2png_cfg, data_root=None, image_path='', seg_prefix=None, proposal_file=None, test_mode=False): self.data_path = data_root self.classes = ['__background__', 'lesion'] self.num_classes = len(self.classes) self.load_annotations(ann_file) self.img_ids = [a['filename'] for a in self.ann] self.cat_ids = self.classes # self.image_fn_list, self.lesion_idx_grouped = self.load_split_index() # self.num_images = len(self.image_fn_list) self.cfg = Config(dicm2png_cfg) self.pipeline = Compose(pipeline) self.pre_pipeline = Compose(pre_pipeline) self.img_path = image_path self.seg_prefix = seg_prefix self.proposals = None if proposal_file is not None: self.proposals = None self.slice_num = self.cfg.NUM_SLICES self.is_train = not test_mode if self.is_train: self._set_group_flag() def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target, info). """ # print(index) ann = self.ann[index] image_fn = ann['filename'] boxes = np.array(ann['ann']['bboxes'], dtype=np.float32) masks= np.array([mutils.decode(m) for m in ann['ann']['masks']], dtype=np.float32).transpose((1,2,0)) # masks = masks.sum(0)>0 slice_intv = ann['ann']['slice_intv'] spacing = ann['ann']['spacing'] label = ann['ann']['labels'] recists = ann['ann']['recists'] diameters = ann['ann']['diameters'] gender = float(ann['ann']['gender']) age = float(ann['ann']['age']) z_coord = float(ann['ann']['z_coord']) im, im_scale = load_prep_img(self.img_path, image_fn, spacing, slice_intv, self.cfg, num_slice=self.slice_num, is_train=self.is_train) # im -= self.cfg.PIXEL_MEAN boxes = self.clip_to_image(boxes, im, False) masks = masks.transpose((2, 0, 1)) boxes = boxes.astype(np.float32) results = dict()#img_info=ann, ann_info=infos results['filename'] = image_fn # results['flage'] = flage infos = {'recists': recists, 'diameters': diameters, 'spacing': spacing, 'thickness':slice_intv } # results['flage'] = flage results['img'] = im results['img_shape'] = im.shape results['ori_shape'] = im.shape#[ann['height'], ann['width']] if self.proposals is not None: results['proposals'] = self.proposals[index] results['bbox_fields'] = [] results['mask_fields'] = [] results['gt_bboxes'] = boxes results['bbox_fields'].append('gt_bboxes') results['gt_labels'] = label.astype(np.int64) results['gt_masks'] = masks results['mask_fields'].append('gt_masks') results['thickness'] = slice_intv results = self.pre_pipeline(results) # results['gt_masks'] = masks_scaled # results['mask_fields'].append('gt_masks') return self.pipeline(results) def __len__(self): return len(self.ann) # return 160 def clip_to_image(self, bbox, img, remove_empty=True): TO_REMOVE = 1 bbox[:, 0] = bbox[:, 0].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 1] = bbox[:, 1].clip(min=0, max=img.shape[0] - TO_REMOVE) bbox[:, 2] = bbox[:, 2].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 3] = bbox[:, 3].clip(min=0, max=img.shape[0] - TO_REMOVE) if remove_empty: box = bbox keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0]) return bbox[keep] return bbox def _set_group_flag(self): """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) if not self.cfg.GROUNP_ZSAPACING: return for i in range(len(self)): img_info = self.ann[i] if img_info['ann']['slice_intv'] < 2.0: self.flag[i] = 1 logging.info(f'slice_intv grounped by 2.0: {sum(self.flag)}/{len(self)-sum(self.flag)}') def load_annotations(self, ann_file): """load annotations and meta-info from DL_info.csv""" with open(ann_file,'rb') as f: self.ann = pickle.load(f) def load_prep_img(data_dir, imname, spacing, slice_intv, cfg, num_slice=3, is_train=False): """load volume, windowing, interpolate multiple slices, clip black border, resize according to spacing""" im = load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv=cfg.SLICE_INTV) im = windowing(im, cfg.WINDOWING) im_shape = im.shape[0:2] im_scale = 1.0 return im, im_scale def load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv): data_cache = {} def _load_data_from_png(imname, delta=0): imname1 = get_slice_name(data_dir, imname, delta) if imname1 not in data_cache.keys(): data_cache[imname1] = cv2.imread(os.path.join(data_dir, imname1), -1) assert data_cache[imname1] is not None, 'file reading error: ' + imname1 # if data_cache[imname1] is None: # print('file reading error:', imname1) return data_cache[imname1] _load_data = _load_data_from_png im_cur = _load_data(imname) if norm_slice_intv == 0 or np.isnan(slice_intv) or slice_intv < 0: ims = [im_cur] * num_slice # only use the central slice else: ims = [im_cur] # find neighboring slices of im_cure rel_pos = float(norm_slice_intv) / slice_intv a = rel_pos - np.floor(rel_pos) b = np.ceil(rel_pos) - rel_pos if a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation for p in range(int((num_slice-1)/2)): im_prev = _load_data(imname, - rel_pos * (p + 1)) im_next = _load_data(imname, rel_pos * (p + 1)) ims = [im_prev] + ims + [im_next] else: for p in range(int((num_slice-1)/2)): intv1 = rel_pos*(p+1) slice1 = _load_data(imname, - np.ceil(intv1)) slice2 = _load_data(imname, - np.floor(intv1)) im_prev = a * slice1 + b * slice2 # linear interpolation slice1 = _load_data(imname, np.ceil(intv1)) slice2 = _load_data(imname, np.floor(intv1)) im_next = a * slice1 + b * slice2 ims = [im_prev] + ims + [im_next] ims = [im.astype(float) for im in ims] im = cv2.merge(ims) im = im.astype(np.float32, copy=False) - 32768 # there is an offset in the 16-bit png files, intensity - 32768 = Hounsfield unit return im def get_slice_name(data_dir, imname, delta=0): """Infer slice name with an offset""" if delta == 0: return imname delta = int(delta) dirname, slicename = imname.split(os.sep) slice_idx = int(slicename[:-4]) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) # if the slice is not in the dataset, use its neighboring slice while not os.path.exists(os.path.join(data_dir, imname1)): # print('file not found:', imname1) delta -= np.sign(delta) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) if delta == 0: break return imname1 def windowing(im, win): """scale intensity from win[0]~win[1] to float numbers in 0~255""" im1 = im.astype(float) im1 -= win[0] im1 /= win[1] - win[0] im1[im1 > 1] = 1 im1[im1 < 0] = 0 im1 *= 255 im1 -= 50 return im1 def windowing_rev(im, win): """backward windowing""" im1 = im.astype(float)#/255 im1 *= win[1] - win[0] im1 += win[0] return im1 # def get_mask(im): # """use a intensity threshold to roughly find the mask of the body""" # th = 32000 # an approximate background intensity value # mask = im > th # mask = binary_opening(mask, structure=np.ones((7, 7))) # roughly remove bed # # mask = binary_dilation(mask) # # mask = binary_fill_holes(mask, structure=np.ones((11,11))) # fill parts like lung # if mask.sum() == 0: # maybe atypical intensity # mask = im * 0 + 1 # return mask.astype(dtype=np.int32) def get_range(mask, margin=0): """Get up, down, left, right extreme coordinates of a binary mask""" idx = np.nonzero(mask) u = max(0, idx[0].min() - margin) d = min(mask.shape[0] - 1, idx[0].max() + margin) l = max(0, idx[1].min() - margin) r = min(mask.shape[1] - 1, idx[1].max() + margin) return [u, d, l, r] def map_box_back(boxes, cx=0, cy=0, im_scale=1.): """Reverse the scaling and offset of boxes""" boxes /= im_scale boxes[:, [0,2]] += cx boxes[:, [1,3]] += cy return boxes def crop_or_pading(img, fixsize): h,w,c = img.shape fh,fw = fixsize mh,mw = max(h, fh),max(w,fw) img_new = np.zeros((mh,mw,c)) img_new[(mh-h)//2:(mh+h)//2, (mw-w)//2:(mw+w)//2, :] = img return img_new[(mh-fh)//2:(mh+fh)//2, (mw-fw)//2:(mw+fw)//2,:], [(mh-h)//2-(mh-fh)//2, (mw-w)//2-(mw-fw)//2]
10,139
34.208333
125
py
AlignShift
AlignShift-master/deeplesion/dataset/DeepLesionDataset_align.py
import numpy as np import os import csv import cv2 import logging from pycocotools import mask as mutils from mmcv import Config import torch import os from mmdet.datasets.registry import DATASETS import pickle from mmdet.datasets.pipelines import Compose from mmdet.datasets.custom import CustomDataset @DATASETS.register_module class DeepLesionDatasetAlign(CustomDataset): CLASSES = ('lesion') def __init__(self, ann_file, pipeline, pre_pipeline, dicm2png_cfg, data_root=None, image_path='', seg_prefix=None, proposal_file=None, test_mode=False): self.data_path = data_root self.classes = ['__background__', 'lesion'] self.num_classes = len(self.classes) self.load_annotations(ann_file) self.img_ids = [a['filename'] for a in self.ann] self.cat_ids = self.classes # self.image_fn_list, self.lesion_idx_grouped = self.load_split_index() # self.num_images = len(self.image_fn_list) self.cfg = Config(dicm2png_cfg) self.pipeline = Compose(pipeline) self.pre_pipeline = Compose(pre_pipeline) self.img_path = image_path self.seg_prefix = seg_prefix self.proposals = None if proposal_file is not None: self.proposals = None self.slice_num = self.cfg.NUM_SLICES self.is_train = not test_mode if self.is_train: self._set_group_flag() def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target, info). """ # print(index) ann = self.ann[index] image_fn = ann['filename'] boxes = np.array(ann['ann']['bboxes'], dtype=np.float32) masks= np.array([mutils.decode(m) for m in ann['ann']['masks']], dtype=np.float32).transpose((1,2,0)) # masks = masks.sum(0)>0 slice_intv = ann['ann']['slice_intv'] spacing = ann['ann']['spacing'] label = ann['ann']['labels'] recists = ann['ann']['recists'] diameters = ann['ann']['diameters'] gender = float(ann['ann']['gender']) age = float(ann['ann']['age']) z_coord = float(ann['ann']['z_coord']) im, im_scale = load_prep_img(self.img_path, image_fn, spacing, slice_intv, self.cfg, num_slice=self.slice_num, is_train=self.is_train) # im -= self.cfg.PIXEL_MEAN boxes = self.clip_to_image(boxes, im, False) masks = masks.transpose((2, 0, 1)) boxes = boxes.astype(np.float32) results = dict()#img_info=ann, ann_info=infos results['filename'] = image_fn # results['flage'] = flage infos = {'recists': recists, 'diameters': diameters, 'spacing': spacing, 'thickness':slice_intv } results['infos'] = infos results['filename'] = image_fn # results['flage'] = flage results['img'] = im results['img_shape'] = im.shape results['ori_shape'] = im.shape#[ann['height'], ann['width']] if self.proposals is not None: results['proposals'] = self.proposals[index] results['bbox_fields'] = [] results['mask_fields'] = [] results['gt_bboxes'] = boxes results['bbox_fields'].append('gt_bboxes') results['gt_labels'] = label.astype(np.int64) results['gt_masks'] = masks results['mask_fields'].append('gt_masks') results['thickness'] = slice_intv results = self.pre_pipeline(results) # results['gt_masks'] = masks_scaled # results['mask_fields'].append('gt_masks') return self.pipeline(results) def __len__(self): return len(self.ann) # return 160 #for debug def clip_to_image(self, bbox, img, remove_empty=True): TO_REMOVE = 1 bbox[:, 0] = bbox[:, 0].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 1] = bbox[:, 1].clip(min=0, max=img.shape[0] - TO_REMOVE) bbox[:, 2] = bbox[:, 2].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 3] = bbox[:, 3].clip(min=0, max=img.shape[0] - TO_REMOVE) if remove_empty: box = bbox keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0]) return bbox[keep] return bbox def _set_group_flag(self): """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) if not self.cfg.GROUNP_ZSAPACING: return for i in range(len(self)): img_info = self.ann[i] if img_info['ann']['slice_intv'] < 2.0: self.flag[i] = 1 logging.info(f'slice_intv grounped by 2.0: {sum(self.flag)}/{len(self)-sum(self.flag)}') def load_annotations(self, ann_file): """load annotations and meta-info from DL_info.csv""" with open(ann_file,'rb') as f: self.ann = pickle.load(f) def load_prep_img(data_dir, imname, spacing, slice_intv, cfg, num_slice=3, is_train=False): """load volume, windowing, interpolate multiple slices, clip black border, resize according to spacing""" im = load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv=cfg.SLICE_INTV) im = windowing(im, cfg.WINDOWING) im_shape = im.shape[0:2] im_scale = 1.0 return im, im_scale def load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv): data_cache = {} def _load_data_from_png(imname, delta=0): imname1 = get_slice_name(data_dir, imname, delta) if imname1 not in data_cache.keys(): data_cache[imname1] = cv2.imread(os.path.join(data_dir, imname1), -1) assert data_cache[imname1] is not None, 'file reading error: ' + imname1 # if data_cache[imname1] is None: # print('file reading error:', imname1) return data_cache[imname1] _load_data = _load_data_from_png im_cur = _load_data(imname) if norm_slice_intv == 0 or np.isnan(slice_intv) or slice_intv < 0: ims = [im_cur] * num_slice # only use the central slice else: ims = [im_cur] # find neighboring slices of im_cure rel_pos = float(norm_slice_intv) / slice_intv a = rel_pos - np.floor(rel_pos) b = np.ceil(rel_pos) - rel_pos if rel_pos <= 1.0:#slice_intv bigger than norm_slice_intv, let alien_conv to interplot for p in range(int((num_slice-1)/2)): im_prev = _load_data(imname, - (p + 1)) im_next = _load_data(imname, (p + 1)) ims = [im_prev] + ims + [im_next] elif a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation for p in range(int((num_slice-1)/2)): im_prev = _load_data(imname, - rel_pos * (p + 1)) im_next = _load_data(imname, rel_pos * (p + 1)) ims = [im_prev] + ims + [im_next] else: for p in range(int((num_slice-1)/2)): intv1 = rel_pos*(p+1) slice1 = _load_data(imname, - np.ceil(intv1)) slice2 = _load_data(imname, - np.floor(intv1)) im_prev = a * slice1 + b * slice2 # linear interpolation slice1 = _load_data(imname, np.ceil(intv1)) slice2 = _load_data(imname, np.floor(intv1)) im_next = a * slice1 + b * slice2 ims = [im_prev] + ims + [im_next] ims = [im.astype(float) for im in ims] im = cv2.merge(ims) im = im.astype(np.float32, copy=False) - 32768 # there is an offset in the 16-bit png files, intensity - 32768 = Hounsfield unit return im def get_slice_name(data_dir, imname, delta=0): """Infer slice name with an offset""" if delta == 0: return imname delta = int(delta) dirname, slicename = imname.split(os.sep) slice_idx = int(slicename[:-4]) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) # if the slice is not in the dataset, use its neighboring slice while not os.path.exists(os.path.join(data_dir, imname1)): # print('file not found:', imname1) delta -= np.sign(delta) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) if delta == 0: break return imname1 def windowing(im, win): """scale intensity from win[0]~win[1] to float numbers in 0~255""" im1 = im.astype(float) im1 -= win[0] im1 /= win[1] - win[0] im1[im1 > 1] = 1 im1[im1 < 0] = 0 im1 *= 255 im1 -= 50 return im1 def windowing_rev(im, win): """backward windowing""" im1 = im.astype(float)#/255 im1 *= win[1] - win[0] im1 += win[0] return im1 def get_range(mask, margin=0): """Get up, down, left, right extreme coordinates of a binary mask""" idx = np.nonzero(mask) u = max(0, idx[0].min() - margin) d = min(mask.shape[0] - 1, idx[0].max() + margin) l = max(0, idx[1].min() - margin) r = min(mask.shape[1] - 1, idx[1].max() + margin) return [u, d, l, r] def map_box_back(boxes, cx=0, cy=0, im_scale=1.): """Reverse the scaling and offset of boxes""" boxes /= im_scale boxes[:, [0,2]] += cx boxes[:, [1,3]] += cy return boxes
9,694
34.126812
125
py
AlignShift
AlignShift-master/deeplesion/dataset/DeepLesionDataset_tsm.py
''' Copyright Apache- borrow from ''' import numpy as np import os import csv import cv2 import logging from pycocotools import mask as mutils from mmcv import Config import torch import os from mmdet.datasets.registry import DATASETS import pickle from mmdet.datasets.pipelines import Compose from mmdet.datasets.custom import CustomDataset @DATASETS.register_module class DeepLesionDatasetTSM(CustomDataset): CLASSES = ('lesion') def __init__(self, ann_file, pipeline, pre_pipeline, dicm2png_cfg, data_root=None, image_path='', seg_prefix=None, proposal_file=None, test_mode=False): self.data_path = data_root self.classes = ['__background__', 'lesion'] self.num_classes = len(self.classes) self.load_annotations(ann_file) self.img_ids = [a['filename'] for a in self.ann] self.cat_ids = self.classes # self.image_fn_list, self.lesion_idx_grouped = self.load_split_index() # self.num_images = len(self.image_fn_list) self.cfg = Config(dicm2png_cfg) self.pipeline = Compose(pipeline) self.pre_pipeline = Compose(pre_pipeline) self.img_path = image_path self.seg_prefix = seg_prefix self.proposals = None if proposal_file is not None: self.proposals = None self.slice_num = self.cfg.NUM_SLICES self.is_train = not test_mode if self.is_train: self._set_group_flag() def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target, info). """ # print(index) ann = self.ann[index] image_fn = ann['filename'] boxes = np.array(ann['ann']['bboxes'], dtype=np.float32) masks= np.array([mutils.decode(m) for m in ann['ann']['masks']], dtype=np.float32).transpose((1,2,0)) # masks = masks.sum(0)>0 slice_intv = ann['ann']['slice_intv'] spacing = ann['ann']['spacing'] label = ann['ann']['labels'] recists = ann['ann']['recists'] diameters = ann['ann']['diameters'] gender = float(ann['ann']['gender']) age = float(ann['ann']['age']) z_coord = float(ann['ann']['z_coord']) im, im_scale = load_prep_img(self.img_path, image_fn, spacing, slice_intv, self.cfg, num_slice=self.slice_num, is_train=self.is_train) # im -= self.cfg.PIXEL_MEAN boxes = self.clip_to_image(boxes, im, False) masks = masks.transpose((2, 0, 1)) boxes = boxes.astype(np.float32) results = dict()#img_info=ann, ann_info=infos results['filename'] = image_fn # results['flage'] = flage infos = {'recists': recists, 'diameters': diameters, 'spacing': spacing, 'thickness':slice_intv } # results['flage'] = flage results['img'] = im results['img_shape'] = im.shape results['ori_shape'] = im.shape#[ann['height'], ann['width']] if self.proposals is not None: results['proposals'] = self.proposals[index] results['bbox_fields'] = [] results['mask_fields'] = [] results['gt_bboxes'] = boxes results['bbox_fields'].append('gt_bboxes') results['gt_labels'] = label.astype(np.int64) results['gt_masks'] = masks results['mask_fields'].append('gt_masks') results['thickness'] = slice_intv results = self.pre_pipeline(results) # results['gt_masks'] = masks_scaled # results['mask_fields'].append('gt_masks') return self.pipeline(results) def __len__(self): return len(self.ann) # return 160 def clip_to_image(self, bbox, img, remove_empty=True): TO_REMOVE = 1 bbox[:, 0] = bbox[:, 0].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 1] = bbox[:, 1].clip(min=0, max=img.shape[0] - TO_REMOVE) bbox[:, 2] = bbox[:, 2].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 3] = bbox[:, 3].clip(min=0, max=img.shape[0] - TO_REMOVE) if remove_empty: box = bbox keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0]) return bbox[keep] return bbox def _set_group_flag(self): """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) if not self.cfg.GROUNP_ZSAPACING: return for i in range(len(self)): img_info = self.ann[i] if img_info['ann']['slice_intv'] < 2.0: self.flag[i] = 1 logging.info(f'slice_intv grounped by 2.0: {sum(self.flag)}/{len(self)-sum(self.flag)}') def load_annotations(self, ann_file): """load annotations and meta-info from DL_info.csv""" with open(ann_file,'rb') as f: self.ann = pickle.load(f) def load_prep_img(data_dir, imname, spacing, slice_intv, cfg, num_slice=3, is_train=False): """load volume, windowing, interpolate multiple slices, clip black border, resize according to spacing""" im = load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv=cfg.SLICE_INTV) im = windowing(im, cfg.WINDOWING) im_shape = im.shape[0:2] im_scale = 1.0 return im, im_scale def load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv): data_cache = {} def _load_data_from_png(imname, delta=0): imname1 = get_slice_name(data_dir, imname, delta) if imname1 not in data_cache.keys(): data_cache[imname1] = cv2.imread(os.path.join(data_dir, imname1), -1) assert data_cache[imname1] is not None, 'file reading error: ' + imname1 # if data_cache[imname1] is None: # print('file reading error:', imname1) return data_cache[imname1] _load_data = _load_data_from_png im_cur = _load_data(imname) if norm_slice_intv == 0 or np.isnan(slice_intv) or slice_intv < 0: ims = [im_cur] * num_slice # only use the central slice else: ims = [im_cur] # find neighboring slices of im_cure rel_pos = float(norm_slice_intv) / slice_intv a = rel_pos - np.floor(rel_pos) b = np.ceil(rel_pos) - rel_pos if a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation for p in range(int((num_slice-1)/2)): im_prev = _load_data(imname, - rel_pos * (p + 1)) im_next = _load_data(imname, rel_pos * (p + 1)) ims = [im_prev] + ims + [im_next] else: for p in range(int((num_slice-1)/2)): intv1 = rel_pos*(p+1) slice1 = _load_data(imname, - np.ceil(intv1)) slice2 = _load_data(imname, - np.floor(intv1)) im_prev = a * slice1 + b * slice2 # linear interpolation slice1 = _load_data(imname, np.ceil(intv1)) slice2 = _load_data(imname, np.floor(intv1)) im_next = a * slice1 + b * slice2 ims = [im_prev] + ims + [im_next] # if rel_pos <= 1.: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation # for p in range(int((num_slice-1)/2)): # im_prev = _load_data(imname, -(p + 1)) # im_next = _load_data(imname, (p + 1)) # ims = [im_prev] + ims + [im_next] # elif a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation # for p in range(int((num_slice-1)/2)): # im_prev = _load_data(imname, - rel_pos * (p + 1)) # im_next = _load_data(imname, rel_pos * (p + 1)) # ims = [im_prev] + ims + [im_next] # else: # for p in range(int((num_slice-1)/2)): # intv1 = rel_pos*(p+1) # slice1 = _load_data(imname, - np.ceil(intv1)) # slice2 = _load_data(imname, - np.floor(intv1)) # im_prev = a * slice1 + b * slice2 # linear interpolation # slice1 = _load_data(imname, np.ceil(intv1)) # slice2 = _load_data(imname, np.floor(intv1)) # im_next = a * slice1 + b * slice2 # ims = [im_prev] + ims + [im_next] ims = [im.astype(float) for im in ims] im = cv2.merge(ims) im = im.astype(np.float32, copy=False) - 32768 # there is an offset in the 16-bit png files, intensity - 32768 = Hounsfield unit return im def get_slice_name(data_dir, imname, delta=0): """Infer slice name with an offset""" if delta == 0: return imname delta = int(delta) dirname, slicename = imname.split(os.sep) slice_idx = int(slicename[:-4]) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) # if the slice is not in the dataset, use its neighboring slice while not os.path.exists(os.path.join(data_dir, imname1)): # print('file not found:', imname1) delta -= np.sign(delta) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) if delta == 0: break return imname1 def windowing(im, win): """scale intensity from win[0]~win[1] to float numbers in 0~255""" im1 = im.astype(float) im1 -= win[0] im1 /= win[1] - win[0] im1[im1 > 1] = 1 im1[im1 < 0] = 0 im1 *= 255 im1 -= 50 return im1 def windowing_rev(im, win): """backward windowing""" im1 = im.astype(float)#/255 im1 *= win[1] - win[0] im1 += win[0] return im1 # def get_mask(im): # """use a intensity threshold to roughly find the mask of the body""" # th = 32000 # an approximate background intensity value # mask = im > th # mask = binary_opening(mask, structure=np.ones((7, 7))) # roughly remove bed # # mask = binary_dilation(mask) # # mask = binary_fill_holes(mask, structure=np.ones((11,11))) # fill parts like lung # if mask.sum() == 0: # maybe atypical intensity # mask = im * 0 + 1 # return mask.astype(dtype=np.int32) def get_range(mask, margin=0): """Get up, down, left, right extreme coordinates of a binary mask""" idx = np.nonzero(mask) u = max(0, idx[0].min() - margin) d = min(mask.shape[0] - 1, idx[0].max() + margin) l = max(0, idx[1].min() - margin) r = min(mask.shape[1] - 1, idx[1].max() + margin) return [u, d, l, r] def map_box_back(boxes, cx=0, cy=0, im_scale=1.): """Reverse the scaling and offset of boxes""" boxes /= im_scale boxes[:, [0,2]] += cx boxes[:, [1,3]] += cy return boxes def crop_or_pading(img, fixsize): h,w,c = img.shape fh,fw = fixsize mh,mw = max(h, fh),max(w,fw) img_new = np.zeros((mh,mw,c)) img_new[(mh-h)//2:(mh+h)//2, (mw-w)//2:(mw+w)//2, :] = img return img_new[(mh-fh)//2:(mh+fh)//2, (mw-fw)//2:(mw+fw)//2,:], [(mh-h)//2-(mh-fh)//2, (mw-w)//2-(mw-fw)//2]
11,411
34.774295
125
py
AlignShift
AlignShift-master/deeplesion/dataset/DeepLesionDataset_a3d.py
import numpy as np import os import csv import cv2 import logging from pycocotools import mask as mutils from mmcv import Config import torch import os from mmdet.datasets.registry import DATASETS import pickle from mmdet.datasets.pipelines import Compose from mmdet.datasets.custom import CustomDataset @DATASETS.register_module class DeepLesionDatasetA3D(CustomDataset): CLASSES = ('lesion') def __init__(self, ann_file, pipeline, pre_pipeline, dicm2png_cfg, data_root=None, image_path='', seg_prefix=None, proposal_file=None, test_mode=False): self.data_path = data_root self.classes = ['__background__', 'lesion'] self.num_classes = len(self.classes) self.load_annotations(ann_file) self.img_ids = [a['filename'] for a in self.ann] self.cat_ids = self.classes # self.image_fn_list, self.lesion_idx_grouped = self.load_split_index() # self.num_images = len(self.image_fn_list) self.cfg = Config(dicm2png_cfg) self.pipeline = Compose(pipeline) self.pre_pipeline = Compose(pre_pipeline) self.img_path = image_path self.seg_prefix = seg_prefix self.proposals = None if proposal_file is not None: self.proposals = None self.slice_num = self.cfg.NUM_SLICES self.is_train = not test_mode if self.is_train: self._set_group_flag() def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: (image, target, info). """ # print(index) ann = self.ann[index] image_fn = ann['filename'] boxes = np.array(ann['ann']['bboxes'], dtype=np.float32) masks= np.array([mutils.decode(m) for m in ann['ann']['masks']], dtype=np.float32).transpose((1,2,0)) # masks = masks.sum(0)>0 slice_intv = ann['ann']['slice_intv'] spacing = ann['ann']['spacing'] label = ann['ann']['labels'] recists = ann['ann']['recists'] diameters = ann['ann']['diameters'] spacing = ann['ann']['spacing'] gender = float(ann['ann']['gender']) age = float(ann['ann']['age']) z_coord = float(ann['ann']['z_coord']) im, im_scale = load_prep_img(self.img_path, image_fn, spacing, slice_intv, self.cfg, num_slice=self.slice_num, is_train=self.is_train) # im -= self.cfg.PIXEL_MEAN boxes = self.clip_to_image(boxes, im, False) masks = masks.transpose((2, 0, 1)) boxes = boxes.astype(np.float32) results = dict()#img_info=ann, ann_info=infos results['filename'] = image_fn # results['flage'] = flage infos = {'recists': recists, 'diameters': diameters, 'spacing': spacing, 'thickness':slice_intv } results['infos'] = infos results['filename'] = image_fn # results['flage'] = flage results['img'] = im results['img_shape'] = im.shape results['ori_shape'] = im.shape#[ann['height'], ann['width']] if self.proposals is not None: results['proposals'] = self.proposals[index] results['bbox_fields'] = [] results['mask_fields'] = [] results['gt_bboxes'] = boxes results['bbox_fields'].append('gt_bboxes') results['gt_labels'] = label.astype(np.int64) results['gt_masks'] = masks results['mask_fields'].append('gt_masks') results['thickness'] = slice_intv results = self.pre_pipeline(results) # results['gt_masks'] = masks_scaled # results['mask_fields'].append('gt_masks') return self.pipeline(results) def __len__(self): return len(self.ann) # return 160 #for debug def clip_to_image(self, bbox, img, remove_empty=True): TO_REMOVE = 1 bbox[:, 0] = bbox[:, 0].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 1] = bbox[:, 1].clip(min=0, max=img.shape[0] - TO_REMOVE) bbox[:, 2] = bbox[:, 2].clip(min=0, max=img.shape[1] - TO_REMOVE) bbox[:, 3] = bbox[:, 3].clip(min=0, max=img.shape[0] - TO_REMOVE) if remove_empty: box = bbox keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0]) return bbox[keep] return bbox def _set_group_flag(self): """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) if not self.cfg.GROUNP_ZSAPACING: return for i in range(len(self)): img_info = self.ann[i] if img_info['ann']['slice_intv'] < 2.0: self.flag[i] = 1 logging.info(f'slice_intv grounped by 2.0: {sum(self.flag)}/{len(self)-sum(self.flag)}') def load_annotations(self, ann_file): """load annotations and meta-info from DL_info.csv""" with open(ann_file,'rb') as f: self.ann = pickle.load(f) def load_prep_img(data_dir, imname, spacing, slice_intv, cfg, num_slice=3, is_train=False): """load volume, windowing, interpolate multiple slices, clip black border, resize according to spacing""" im = load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv=cfg.SLICE_INTV) im = windowing(im, cfg.WINDOWING) im_shape = im.shape[0:2] im_scale = 1.0 return im, im_scale def load_multislice_img_16bit_png(data_dir, imname, slice_intv, num_slice, norm_slice_intv): data_cache = {} def _load_data_from_png(imname, delta=0): imname1 = get_slice_name(data_dir, imname, delta) if imname1 not in data_cache.keys(): data_cache[imname1] = cv2.imread(os.path.join(data_dir, imname1), -1) assert data_cache[imname1] is not None, 'file reading error: ' + imname1 # if data_cache[imname1] is None: # print('file reading error:', imname1) return data_cache[imname1] _load_data = _load_data_from_png im_cur = _load_data(imname) if norm_slice_intv == 0 or np.isnan(slice_intv) or slice_intv < 0: ims = [im_cur] * num_slice # only use the central slice else: ims = [im_cur] # find neighboring slices of im_cure rel_pos = float(norm_slice_intv) / slice_intv a = rel_pos - np.floor(rel_pos) b = np.ceil(rel_pos) - rel_pos ###########################都插值######################### if a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation for p in range(int((num_slice-1)/2)): im_prev = _load_data(imname, - rel_pos * (p + 1)) im_next = _load_data(imname, rel_pos * (p + 1)) ims = [im_prev] + ims + [im_next] else: for p in range(int((num_slice-1)/2)): intv1 = rel_pos*(p+1) slice1 = _load_data(imname, - np.ceil(intv1)) slice2 = _load_data(imname, - np.floor(intv1)) im_prev = a * slice1 + b * slice2 # linear interpolation slice1 = _load_data(imname, np.ceil(intv1)) slice2 = _load_data(imname, np.floor(intv1)) im_next = a * slice1 + b * slice2 ims = [im_prev] + ims + [im_next] # if rel_pos <= 1.0:#slice_intv bigger than norm_slice_intv, let alien_conv to interplot ###########################厚层不插值######################### # for p in range(int((num_slice-1)/2)): # im_prev = _load_data(imname, - (p + 1)) # im_next = _load_data(imname, (p + 1)) # ims = [im_prev] + ims + [im_next] ###########################厚层不插值######################### ###########################厚层插值######################### # for p in range(int((num_slice-1)/2)): # intv1 = rel_pos*(p+1) # slice1 = _load_data(imname, - np.ceil(intv1)) # slice2 = _load_data(imname, - np.floor(intv1)) # im_prev = a * slice1 + b * slice2 # linear interpolation # slice1 = _load_data(imname, np.ceil(intv1)) # slice2 = _load_data(imname, np.floor(intv1)) # im_next = a * slice1 + b * slice2 # ims = [im_prev] + ims + [im_next] ###########################厚层插值######################### # elif a == 0: # required SLICE_INTV is a divisible to the actual slice_intv, don't need interpolation # for p in range(int((num_slice-1)/2)): # im_prev = _load_data(imname, - rel_pos * (p + 1)) # im_next = _load_data(imname, rel_pos * (p + 1)) # ims = [im_prev] + ims + [im_next] ###########################薄层不插值######################### # else: # for p in range(int((num_slice-1)/2)): # intv1 = p+1 # im_prev = _load_data(imname, -intv1) # im_next = _load_data(imname, intv1) # ims = [im_prev] + ims + [im_next] ###########################薄层不插值######################### ###########################薄层插值######################### # else: # for p in range(int((num_slice-1)/2)): # intv1 = rel_pos*(p+1) # slice1 = _load_data(imname, - np.ceil(intv1)) # slice2 = _load_data(imname, - np.floor(intv1)) # im_prev = a * slice1 + b * slice2 # linear interpolation # slice1 = _load_data(imname, np.ceil(intv1)) # slice2 = _load_data(imname, np.floor(intv1)) # im_next = a * slice1 + b * slice2 # ims = [im_prev] + ims + [im_next] ###########################薄层插值######################### ims = [im.astype(float) for im in ims] im = cv2.merge(ims) im = im.astype(np.float32, copy=False) - 32768 # there is an offset in the 16-bit png files, intensity - 32768 = Hounsfield unit return im def get_slice_name(data_dir, imname, delta=0): """Infer slice name with an offset""" if delta == 0: return imname delta = int(delta) dirname, slicename = imname.split(os.sep) slice_idx = int(slicename[:-4]) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) # if the slice is not in the dataset, use its neighboring slice while not os.path.exists(os.path.join(data_dir, imname1)): # print('file not found:', imname1) delta -= np.sign(delta) imname1 = '%s%s%03d.png' % (dirname, os.sep, slice_idx + delta) if delta == 0: break return imname1 def windowing(im, win): """scale intensity from win[0]~win[1] to float numbers in 0~255""" im1 = im.astype(float) im1 -= win[0] im1 /= win[1] - win[0] im1[im1 > 1] = 1 im1[im1 < 0] = 0 im1 *= 255 im1 -= 50 return im1 def windowing_rev(im, win): """backward windowing""" im1 = im.astype(float)#/255 im1 *= win[1] - win[0] im1 += win[0] return im1 def get_range(mask, margin=0): """Get up, down, left, right extreme coordinates of a binary mask""" idx = np.nonzero(mask) u = max(0, idx[0].min() - margin) d = min(mask.shape[0] - 1, idx[0].max() + margin) l = max(0, idx[1].min() - margin) r = min(mask.shape[1] - 1, idx[1].max() + margin) return [u, d, l, r] def map_box_back(boxes, cx=0, cy=0, im_scale=1.): """Reverse the scaling and offset of boxes""" boxes /= im_scale boxes[:, [0,2]] += cx boxes[:, [1,3]] += cy return boxes
12,082
36.641745
125
py
AlignShift
AlignShift-master/deeplesion/models/truncated_densenet3d_alignshift.py
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, from collections import namedtuple import torch import torch.nn.functional as F from torch import nn import torchvision.models as models from torchvision.models.densenet import model_urls import math import torch.utils.model_zoo as model_zoo import re from collections import OrderedDict from mmdet.models.registry import BACKBONES from nn.operators import AlignShiftConv import torch.utils.checkpoint as cp from mmdet.models.utils import build_conv_layer, build_norm_layer # mybn = nn.BatchNorm3d # mybn = nn.SyncBatchNorm norm_cfg = dict(type='SyncBN') def _bn_function_factory(norm, relu, conv, thickness): def bn_function(*inputs): concated_features = torch.cat(inputs, 1) bottleneck_output = conv(relu(norm(concated_features)), thickness) return bottleneck_output return bn_function class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, n_fold, memory_efficient=False, ref_thickness=None): super(_DenseLayer, self).__init__() self.add_module('norm1', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]), self.add_module('relu1', nn.ReLU(inplace=False)), self.add_module('conv1', AlignShiftConv(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, alignshift=True, bias=False, ref_thickness=ref_thickness, n_fold=n_fold,)), self.add_module('norm2', build_norm_layer(norm_cfg, bn_size * growth_rate, postfix=1)[1]), self.add_module('relu2', nn.ReLU(inplace=False)), self.add_module('conv2', AlignShiftConv(bn_size * growth_rate, growth_rate, alignshift=True, kernel_size=3, stride=1, padding=1, bias=False, ref_thickness=ref_thickness, n_fold=n_fold)), self.drop_rate = drop_rate self.memory_efficient = memory_efficient def forward(self, *prev_features, thickness): bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1, thickness=thickness) if self.memory_efficient:# and any(prev_feature.requires_grad for prev_feature in prev_features):hyadd 这里自己设计节省效率 bottleneck_output = cp.checkpoint(bn_function, *prev_features) else: bottleneck_output = bn_function(*prev_features, thickness=thickness) new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)), thickness) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class _DenseBlock(nn.Module): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, n_fold, memory_efficient=False, ref_thickness=None): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient, ref_thickness=ref_thickness, n_fold=n_fold ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features, thickness): features = [init_features] for name, layer in self.named_children(): new_features = layer(*features, thickness=thickness) features.append(new_features) return torch.cat(features, 1) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features): super().__init__() self.add_module('norm', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', AlignShiftConv(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False, alignshift=False)) self.add_module('pool', nn.AvgPool3d(kernel_size=[1, 2, 2], stride=[1, 2, 2])) class _Reduction_z(nn.Sequential): def __init__(self, input_features, input_slice): super().__init__() self.add_module('reduction_z_conv', nn.Conv3d(input_features, input_features, kernel_size=[input_slice, 1, 1], stride=1, bias=False)) # self.add_module('reduction_z_pooling', nn.AvgPool3d(kernel_size=[input_slice, 1, 1], stride=1)) @BACKBONES.register_module class DenseNetCustomTrunc3dAlign(nn.Module): def __init__(self, out_dim=256, n_cts=3, fpn_finest_layer=1, ref_thickness=2.0, n_fold=8, memory_efficient=True): super().__init__() self.depth = 121 self.feature_upsample = True self.fpn_finest_layer = fpn_finest_layer self.out_dim = out_dim self.n_cts = n_cts self.mid_ct = n_cts//2 self.ref_thickness = ref_thickness self.n_fold = n_fold assert self.depth in [121] if self.depth == 121: num_init_features = 64 growth_rate = 32 block_config = (6, 12, 24) self.in_dim = [64, 256, 512, 1024] bn_size = 4 drop_rate = 0 # First convolution self.conv0 = AlignShiftConv(1, num_init_features, kernel_size=7, stride=2, padding=3, bias=False, alignshift=False) self.norm0 = build_norm_layer(norm_cfg, num_init_features, postfix=1)[1] self.relu0 = nn.ReLU(inplace=True) self.pool0 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, ref_thickness=self.ref_thickness, n_fold=self.n_fold, memory_efficient=memory_efficient) self.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate reductionz = _Reduction_z(num_features, self.n_cts) # normrelu = _StageNormRelu(num_features) # self.add_module('normrelu%d' % (i + 1), normrelu) self.add_module('reductionz%d' % (i + 1), reductionz) if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm # self.add_module('norm5', nn.BatchNorm2d(num_features)) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight.data) elif isinstance(m, nn.BatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() if self.feature_upsample: for p in range(4, self.fpn_finest_layer - 1, -1): layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1) name = 'lateral%d' % p self.add_module(name, layer) nn.init.kaiming_uniform_(layer.weight, a=1) nn.init.constant_(layer.bias, 0) self.init_weights() # if syncbn: # self = nn.SyncBatchNorm.convert_sync_batchnorm(self) def forward(self, x, thickness): x = self.conv0(x) x = self.norm0(x) x = self.relu0(x) x = self.pool0(x) x = self.denseblock1(x, thickness) # x = self.normrelu1(x) redc1 = self.reductionz1(x) x = self.transition1(x) x = self.denseblock2(x, thickness) # x = self.normrelu2(x) redc2 = self.reductionz2(x) x = self.transition2(x) x = self.denseblock3(x, thickness) # x = self.normrelu3(x) redc3 = self.reductionz3(x) # truncated since here since we find it works better in DeepLesion # ts3 = self.transition3(db3) # db4 = self.denseblock4(ts3) # if self.feature_upsample: ftmaps = [None, redc1.squeeze(2), redc2.squeeze(2), redc3.squeeze(2)] x = self.lateral4(ftmaps[-1]) for p in range(3, self.fpn_finest_layer - 1, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") y = ftmaps[p-1] lateral = getattr(self, 'lateral%d' % p)(y) x += lateral return [x] # else: # return [db3] def init_weights(self, pretrained=True): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] state_dict1 = {} for key in list(state_dict.keys()): new_key = key.replace('features.', '') if state_dict[key].dim() == 4: t0 = state_dict[key].shape[1] state_dict1[new_key] = state_dict[key]#.unsqueeze(2)#.repeat((1,1,self.n_cts,1,1))/self.n_cts if t0 == 3: state_dict1[new_key] = state_dict1[new_key][:,1:2,...] else: state_dict1[new_key] = state_dict[key] key = self.load_state_dict(state_dict1, strict=False) #print(key) def freeze(self): for name, param in self.named_parameters(): print('freezing', name) param.requires_grad = False
10,288
42.413502
140
py
AlignShift
AlignShift-master/deeplesion/models/truncated_densenet.py
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, from collections import namedtuple import torch import torch.nn.functional as F from torch import nn import torchvision.models as models from torchvision.models.densenet import _DenseBlock, _Transition, model_urls import math import torch.utils.model_zoo as model_zoo import re from collections import OrderedDict from mmdet.models.registry import BACKBONES @BACKBONES.register_module class DenseNetCustomTrunc(nn.Module): def __init__(self, out_dim=256, in_channels=3, fpn_finest_layer=1): super().__init__() self.depth = 121 self.feature_upsample = True self.fpn_finest_layer = fpn_finest_layer self.out_dim = out_dim self.in_channel = in_channels assert self.depth in [121] if self.depth == 121: num_init_features = 64 growth_rate = 32 block_config = (6, 12, 24) self.in_dim = [64, 256, 512, 1024] bn_size = 4 drop_rate = 0 # First convolution self.conv0 = nn.Conv2d(self.in_channel, num_init_features, kernel_size=7, stride=2, padding=3, bias=False) self.norm0 = nn.BatchNorm2d(num_init_features) self.relu0 = nn.ReLU(inplace=True) self.pool0 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, \ memory_efficient=True) self.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm # self.add_module('norm5', nn.BatchNorm2d(num_features)) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight.data) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() if self.feature_upsample: for p in range(4, self.fpn_finest_layer - 1, -1): layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1) name = 'lateral%d' % p self.add_module(name, layer) nn.init.kaiming_uniform_(layer.weight, a=1) nn.init.constant_(layer.bias, 0) self.init_weights() def forward(self, x): x = self.conv0(x) x = self.norm0(x) relu0 = self.relu0(x) pool0 = self.pool0(relu0) db1 = self.denseblock1(pool0) ts1 = self.transition1(db1) db2 = self.denseblock2(ts1) ts2 = self.transition2(db2) db3 = self.denseblock3(ts2) # truncated since here since we find it works better in DeepLesion # ts3 = self.transition3(db3) # db4 = self.denseblock4(ts3) if self.feature_upsample: ftmaps = [relu0, db1, db2, db3] x = self.lateral4(ftmaps[-1]) for p in range(3, self.fpn_finest_layer - 1, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") y = ftmaps[p-1] lateral = getattr(self, 'lateral%d' % p)(y) x += lateral return [x] else: return [db3] def init_weights(self, pretrained=True): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] state_dict1 = {} for key in list(state_dict.keys()): new_key = key.replace('features.', '') if self.in_channel != 3: if len(state_dict[key].shape)==4 and state_dict[key].shape[1]==3: a = self.in_channel // 3 + 1 b = self.in_channel % 3 + 1 state_dict[key] = state_dict[key].repeat((1,a,1,1))[:,b//2:b//2-b,...] state_dict1[new_key] = state_dict[key] self.load_state_dict(state_dict1, strict=False) def freeze(self): for name, param in self.named_parameters(): print('freezing', name) param.requires_grad = False
5,125
37.833333
114
py
AlignShift
AlignShift-master/deeplesion/models/detector.py
from mmdet.models.registry import DETECTORS from mmdet.models.detectors.two_stage import TwoStageDetector import torch import torch.nn as nn from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler, auto_fp16 from mmdet.models import builder from mmdet.models.detectors.base import BaseDetector from mmdet.models.detectors.test_mixins import BBoxTestMixin, MaskTestMixin, RPNTestMixin @DETECTORS.register_module class AlignShiftMaskRCNN(TwoStageDetector): def __init__(self, backbone, rpn_head, bbox_roi_extractor, bbox_head, mask_roi_extractor, mask_head, train_cfg, test_cfg, neck=None, shared_head=None, pretrained=None): super(AlignShiftMaskRCNN, self).__init__( backbone=backbone, neck=neck, shared_head=shared_head, rpn_head=rpn_head, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) def extract_feat(self, img, thickness): """Directly extract features from the backbone+neck """ x = self.backbone(img, thickness) if self.with_neck: x = self.neck(x) return x def forward_train(self, img, img_meta, gt_bboxes, gt_labels, thickness, additional_fts=None, gt_bboxes_ignore=None, gt_masks=None, proposals=None): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_meta (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and my also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. proposals : override rpn proposals with custom proposals. Use when `with_rpn` is False. Returns: dict[str, Tensor]: a dictionary of loss components """ x = self.extract_feat(img, thickness) losses = dict() # RPN forward and loss if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals # assign gts and sample proposals if self.with_bbox or self.with_mask: bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) bbox_sampler = build_sampler( self.train_cfg.rcnn.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss if self.with_bbox: rois = bbox2roi([res.bboxes for res in sampling_results]) # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) if additional_fts is not None: cls_score, bbox_pred = self.bbox_head(bbox_feats, sampling_results, additional_fts) else: cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) # print(cls_score.shape, bbox_targets[0].shape, bbox_targets[2].shape) # print(cls_score[0], bbox_pred[0], bbox_targets[0][0],bbox_targets[2][0]) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets) losses.update(loss_bbox) # mask head forward and loss if self.with_mask: if not self.share_roi_extractor: pos_rois = bbox2roi( [res.pos_bboxes for res in sampling_results]) mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_feats = bbox_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_targets = self.mask_head.get_target(sampling_results, gt_masks, self.train_cfg.rcnn) pos_labels = torch.cat( [res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels) losses.update(loss_mask) return losses def simple_test(self, img, img_meta, thickness, additional_fts=None, proposals=None, rescale=False, **kwargs):#hyadd """Test without augmentation.""" assert self.with_bbox, "Bbox head must be implemented." x = self.extract_feat(img, thickness) proposal_list = self.simple_test_rpn( x, img_meta, self.test_cfg.rpn) if proposals is None else proposals det_bboxes, det_labels = self.simple_test_bboxes( x, img_meta, proposal_list, self.test_cfg.rcnn, additional_fts, rescale=rescale)#, sampling_results, additional_fts) # print(proposal_list[0][0], det_bboxes.shape,det_labels.shape) bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_meta, det_bboxes, det_labels, rescale=rescale) return bbox_results, segm_results def simple_test_bboxes(self, x, img_meta, proposals, rcnn_test_cfg, additional_fts=None, rescale=False): """Test only det bboxes without augmentation.""" rois = bbox2roi(proposals) roi_feats = self.bbox_roi_extractor( x[:len(self.bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: roi_feats = self.shared_head(roi_feats) if additional_fts is not None: cls_score, bbox_pred = self.bbox_head(roi_feats, proposals, additional_fts) else: cls_score, bbox_pred = self.bbox_head(roi_feats) img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] det_bboxes, det_labels = self.bbox_head.get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) return det_bboxes, det_labels @auto_fp16(apply_to=('img', )) def forward(self, img, img_meta, return_loss=True, debug=False, **kwargs): """ Calls either forward_train or forward_test depending on whether return_loss=True. Note this setting will change the expected inputs. When `return_loss=False`, img and img_meta are single-nested (i.e. Tensor and List[dict]), and when `resturn_loss=True`, img and img_meta should be double nested (i.e. List[Tensor], List[List[dict]]), with the outer list indicating test time augmentations. """ if return_loss: return self.forward_train(img, img_meta, **kwargs) else: return self.forward_test(img, img_meta, **kwargs)
10,742
41.295276
128
py
AlignShift
AlignShift-master/deeplesion/models/truncated_densenet3d_tsm.py
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory, from collections import namedtuple import torch import torch.nn.functional as F from torch import nn import torchvision.models as models from torchvision.models.densenet import model_urls import math import torch.utils.model_zoo as model_zoo import re from collections import OrderedDict from mmdet.models.registry import BACKBONES from nn.operators.tsmconv import TSMConv import torch.utils.checkpoint as cp from mmdet.models.utils import build_conv_layer, build_norm_layer # mybn = nn.BatchNorm3d # mybn = nn.SyncBatchNorm norm_cfg = dict(type='SyncBN') def _bn_function_factory(norm, relu, conv): def bn_function(*inputs): concated_features = torch.cat(inputs, 1) bottleneck_output = conv(relu(norm(concated_features))) return bottleneck_output return bn_function class _DenseLayer(nn.Sequential): def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, n_fold, memory_efficient=False): super(_DenseLayer, self).__init__() self.add_module('norm1', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]), self.add_module('relu1', nn.ReLU(inplace=False)), self.add_module('conv1', TSMConv(num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False, n_fold=n_fold)), self.add_module('norm2', build_norm_layer(norm_cfg, bn_size * growth_rate, postfix=1)[1]), self.add_module('relu2', nn.ReLU(inplace=False)), self.add_module('conv2', TSMConv(bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False, n_fold=n_fold)), self.drop_rate = drop_rate self.memory_efficient = memory_efficient def forward(self, *prev_features): bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1) if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features): bottleneck_output = cp.checkpoint(bn_function, *prev_features) else: bottleneck_output = bn_function(*prev_features) new_features = self.conv2(self.relu2(self.norm2(bottleneck_output))) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class _DenseBlock(nn.Module): def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, n_fold, memory_efficient=False): super(_DenseBlock, self).__init__() for i in range(num_layers): layer = _DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, drop_rate=drop_rate, memory_efficient=memory_efficient, n_fold=n_fold, ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for name, layer in self.named_children(): new_features = layer(*features) features.append(new_features) return torch.cat(features, 1) class _Transition(nn.Sequential): def __init__(self, num_input_features, num_output_features): super(_Transition, self).__init__() self.add_module('norm', build_norm_layer(norm_cfg, num_input_features, postfix=1)[1]) self.add_module('relu', nn.ReLU(inplace=True)) self.add_module('conv', TSMConv(num_input_features, num_output_features, kernel_size=1, stride=1, bias=False, tsm=False)) self.add_module('pool', nn.AvgPool3d(kernel_size=[1, 2, 2], stride=[1, 2, 2]))#, padding=[0,1,1] class _Reduction_z(nn.Sequential): def __init__(self, input_features, input_slice): super().__init__() self.add_module('reduction_z_conv', nn.Conv3d(input_features, input_features, kernel_size=[input_slice, 1, 1], stride=1, bias=False)) @BACKBONES.register_module class DenseNetCustomTrunc3dTSM(nn.Module): def __init__(self, out_dim=256, n_cts=3, fpn_finest_layer=1, memory_efficient=True, n_fold=8,): super().__init__() self.depth = 121 self.feature_upsample = True self.fpn_finest_layer = fpn_finest_layer self.out_dim = out_dim self.n_cts = n_cts self.mid_ct = n_cts//2 self.n_fold = n_fold assert self.depth in [121] if self.depth == 121: num_init_features = 64 growth_rate = 32 block_config = (6, 12, 24) self.in_dim = [64, 256, 512, 1024] bn_size = 4 drop_rate = 0 # First convolution self.conv0 = TSMConv(1, num_init_features, kernel_size=7, stride=2, padding=3, bias=False, tsm=False) self.norm0 = build_norm_layer(norm_cfg, num_init_features, postfix=1)[1] self.relu0 = nn.ReLU(inplace=True) self.pool0 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1]) # Each denseblock num_features = num_init_features for i, num_layers in enumerate(block_config): block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, n_fold=self.n_fold, memory_efficient=memory_efficient) self.add_module('denseblock%d' % (i + 1), block) num_features = num_features + num_layers * growth_rate reductionz = _Reduction_z(num_features, self.n_cts) self.add_module('reductionz%d' % (i + 1), reductionz) if i != len(block_config) - 1: trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) self.add_module('transition%d' % (i + 1), trans) num_features = num_features // 2 # Final batch norm # self.add_module('norm5', nn.BatchNorm2d(num_features)) # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight.data) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() if self.feature_upsample: for p in range(4, self.fpn_finest_layer - 1, -1): layer = nn.Conv2d(self.in_dim[p - 1], self.out_dim, 1) name = 'lateral%d' % p self.add_module(name, layer) nn.init.kaiming_uniform_(layer.weight, a=1) nn.init.constant_(layer.bias, 0) self.init_weights() # if syncbn: # self = nn.SyncBatchNorm.convert_sync_batchnorm(self) def forward(self, x): x = self.conv0(x) x = self.norm0(x) x = self.relu0(x) x = self.pool0(x) x = self.denseblock1(x) redc1 = self.reductionz1(x) x = self.transition1(x) x = self.denseblock2(x) redc2 = self.reductionz2(x) x = self.transition2(x) x = self.denseblock3(x) redc3 = self.reductionz3(x) # truncated since here since we find it works better in DeepLesion # ts3 = self.transition3(db3) # db4 = self.denseblock4(ts3) # if self.feature_upsample: ftmaps = [None, redc1.squeeze(2), redc2.squeeze(2), redc3.squeeze(2)] x = self.lateral4(ftmaps[-1]) for p in range(3, self.fpn_finest_layer - 1, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") y = ftmaps[p-1] lateral = getattr(self, 'lateral%d' % p)(y) x += lateral return [x] def init_weights(self, pretrained=True): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') state_dict = model_zoo.load_url(model_urls['densenet121']) for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] state_dict1 = {} for key in list(state_dict.keys()): new_key = key.replace('features.', '') if state_dict[key].dim() == 4: t0 = state_dict[key].shape[1] state_dict1[new_key] = state_dict[key]#.unsqueeze(2)#.repeat((1,1,self.n_cts,1,1))/self.n_cts if t0 == 3: state_dict1[new_key] = state_dict1[new_key][:,1:2,...] else: state_dict1[new_key] = state_dict[key] self.load_state_dict(state_dict1, strict=False) def freeze(self): for name, param in self.named_parameters(): print('freezing', name) param.requires_grad = False
9,413
41.026786
120
py
AlignShift
AlignShift-master/deeplesion/losses/diceloss.py
import torch import torch.nn as nn import torch.nn.functional as F from mmdet.models.registry import LOSSES def dice_loss(input, target): smooth = 0.01 input = input.sigmoid() # intersection = ((input * target).sum(dim=(1,2)))#减少一个中间变量 是否可以减少内存#hyadd dice = ((2. * ((input * target).sum(dim=(1,2))) + smooth) / (input.sum(dim=(1,2)) + target.sum(dim=(1,2)) + smooth)) return 1 - dice.mean() def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): # TODO: handle these two reserved arguments # assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return dice_loss(pred_slice, target)[None] # return F.binary_cross_entropy_with_logits( # pred_slice, target, reduction='mean')[None] @LOSSES.register_module class DiceLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', loss_weight=1.0): super(DiceLoss, self).__init__() assert (use_sigmoid is False) or (use_mask is False) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight if self.use_mask: self.cls_criterion = mask_cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): # assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * self.cls_criterion( cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls
2,058
33.316667
79
py
AlignShift
AlignShift-master/mmdet/apis/inference.py
import warnings import matplotlib.pyplot as plt import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.parallel import collate, scatter from mmcv.runner import load_checkpoint from mmdet.core import get_classes from mmdet.datasets.pipelines import Compose from mmdet.models import build_detector def init_detector(config, checkpoint=None, device='cuda:0'): """Initialize a detector from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. Returns: nn.Module: The constructed detector. """ if isinstance(config, str): config = mmcv.Config.fromfile(config) elif not isinstance(config, mmcv.Config): raise TypeError('config must be a filename or Config object, ' 'but got {}'.format(type(config))) config.model.pretrained = None model = build_detector(config.model, test_cfg=config.test_cfg) if checkpoint is not None: checkpoint = load_checkpoint(model, checkpoint) if 'CLASSES' in checkpoint['meta']: model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.warn('Class names are not saved in the checkpoint\'s ' 'meta data, use COCO classes by default.') model.CLASSES = get_classes('coco') model.cfg = config # save the config in the model for convenience model.to(device) model.eval() return model class LoadImage(object): def __call__(self, results): if isinstance(results['img'], str): results['filename'] = results['img'] else: results['filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_shape'] = img.shape results['ori_shape'] = img.shape return results def inference_detector(model, img): """Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str/ndarray or list[str/ndarray]): Either image files or loaded images. Returns: If imgs is a str, a generator will be returned, otherwise return the detection results directly. """ cfg = model.cfg device = next(model.parameters()).device # model device # build the data pipeline test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] test_pipeline = Compose(test_pipeline) # prepare data data = dict(img=img) data = test_pipeline(data) data = scatter(collate([data], samples_per_gpu=1), [device])[0] # forward the model with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) return result # TODO: merge this method with the one in BaseDetector def show_result(img, result, class_names, score_thr=0.3, wait_time=0, show=True, out_file=None): """Visualize the detection results on the image. Args: img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. class_names (list[str] or tuple[str]): A list of class names. score_thr (float): The threshold to visualize the bboxes and masks. wait_time (int): Value of waitKey param. show (bool, optional): Whether to show the image with opencv or not. out_file (str, optional): If specified, the visualization result will be written to the out file instead of shown in a window. Returns: np.ndarray or None: If neither `show` nor `out_file` is specified, the visualized image is returned, otherwise None is returned. """ assert isinstance(class_names, (tuple, list)) img = mmcv.imread(img) img = img.copy() if isinstance(result, tuple): bbox_result, segm_result = result else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) # draw segmentation masks if segm_result is not None: segms = mmcv.concat_list(segm_result) inds = np.where(bboxes[:, -1] > score_thr)[0] for i in inds: color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8) mask = maskUtils.decode(segms[i]).astype(np.bool) img[mask] = img[mask] * 0.5 + color_mask * 0.5 # draw bounding boxes labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) mmcv.imshow_det_bboxes( img, bboxes, labels, class_names=class_names, score_thr=score_thr, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img def show_result_pyplot(img, result, class_names, score_thr=0.3, fig_size=(15, 10)): """Visualize the detection results on the image. Args: img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. class_names (list[str] or tuple[str]): A list of class names. score_thr (float): The threshold to visualize the bboxes and masks. fig_size (tuple): Figure size of the pyplot figure. out_file (str, optional): If specified, the visualization result will be written to the out file instead of shown in a window. """ img = show_result( img, result, class_names, score_thr=score_thr, show=False) plt.figure(figsize=fig_size) plt.imshow(mmcv.bgr2rgb(img))
5,973
33.732558
79
py
AlignShift
AlignShift-master/mmdet/apis/train.py
from __future__ import division import re from collections import OrderedDict import torch from mmcv.parallel import MMDataParallel, MMDistributedDataParallel from mmcv.runner import DistSamplerSeedHook, Runner, obj_from_dict from mmdet import datasets from mmdet.core import (CocoDistEvalmAPHook, CocoDistEvalRecallHook, DistEvalmAPHook, DistOptimizerHook, Fp16OptimizerHook) from mmdet.datasets import DATASETS, build_dataloader from mmdet.models import RPN from .env import get_root_logger from deeplesion.evaluation.evaluation import MyDeepLesionEval def parse_losses(losses): log_vars = OrderedDict() for loss_name, loss_value in losses.items(): if isinstance(loss_value, torch.Tensor): try: log_vars[loss_name] = loss_value.mean() except: log_vars[loss_name] = loss_value elif isinstance(loss_value, list): log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value) else: log_vars[loss_name] = loss_value # else: # raise TypeError( # '{} is not a tensor or list of tensors'.format(loss_name)) loss = sum(_value for _key, _value in log_vars.items() if 'loss' in _key) log_vars['loss'] = loss for name in log_vars: try: log_vars[name] = log_vars[name].item() except: log_vars[name] = log_vars[name] return loss, log_vars def batch_processor(model, data, train_mode): losses = model(**data, return_loss=train_mode) loss, log_vars = parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(data['img'].data)) return outputs def train_detector(model, dataset, cfg, distributed=False, validate=False, logger=None): if logger is None: logger = get_root_logger(cfg.log_level) # start training if distributed: _dist_train(model, dataset, cfg, validate=validate) else: _non_dist_train(model, dataset, cfg, validate=validate) def build_optimizer(model, optimizer_cfg): """Build optimizer from configs. Args: model (:obj:`nn.Module`): The model with parameters to be optimized. optimizer_cfg (dict): The config dict of the optimizer. Positional fields are: - type: class name of the optimizer. - lr: base learning rate. Optional fields are: - any arguments of the corresponding optimizer type, e.g., weight_decay, momentum, etc. - paramwise_options: a dict with 3 accepted fileds (bias_lr_mult, bias_decay_mult, norm_decay_mult). `bias_lr_mult` and `bias_decay_mult` will be multiplied to the lr and weight decay respectively for all bias parameters (except for the normalization layers), and `norm_decay_mult` will be multiplied to the weight decay for all weight and bias parameters of normalization layers. Returns: torch.optim.Optimizer: The initialized optimizer. Example: >>> model = torch.nn.modules.Conv1d(1, 1, 1) >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9, >>> weight_decay=0.0001) >>> optimizer = build_optimizer(model, optimizer_cfg) """ if hasattr(model, 'module'): model = model.module optimizer_cfg = optimizer_cfg.copy() paramwise_options = optimizer_cfg.pop('paramwise_options', None) # if no paramwise option is specified, just use the global setting if paramwise_options is None: return obj_from_dict(optimizer_cfg, torch.optim, dict(params=model.parameters())) else: assert isinstance(paramwise_options, dict) # get base lr and weight decay base_lr = optimizer_cfg['lr'] base_wd = optimizer_cfg.get('weight_decay', None) # weight_decay must be explicitly specified if mult is specified if ('bias_decay_mult' in paramwise_options or 'norm_decay_mult' in paramwise_options): assert base_wd is not None # get param-wise options bias_lr_mult = paramwise_options.get('bias_lr_mult', 1.) bias_decay_mult = paramwise_options.get('bias_decay_mult', 1.) norm_decay_mult = paramwise_options.get('norm_decay_mult', 1.) # set param-wise lr and weight decay params = [] for name, param in model.named_parameters(): param_group = {'params': [param]} if not param.requires_grad: # FP16 training needs to copy gradient/weight between master # weight copy and model weight, it is convenient to keep all # parameters here to align with model.parameters() params.append(param_group) continue # for norm layers, overwrite the weight decay of weight and bias # TODO: obtain the norm layer prefixes dynamically if re.search(r'(bn|gn)(\d+)?.(weight|bias)', name): if base_wd is not None: param_group['weight_decay'] = base_wd * norm_decay_mult # for other layers, overwrite both lr and weight decay of bias elif name.endswith('.bias'): param_group['lr'] = base_lr * bias_lr_mult if base_wd is not None: param_group['weight_decay'] = base_wd * bias_decay_mult # otherwise use the global settings params.append(param_group) optimizer_cls = getattr(torch.optim, optimizer_cfg.pop('type')) return optimizer_cls(params, **optimizer_cfg) def _dist_train(model, dataset, cfg, validate=False): # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] data_loaders = [ build_dataloader( ds, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=True) for ds in dataset ] # put model on gpus model = MMDistributedDataParallel(model.cuda()) # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level) # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook(**cfg.optimizer_config, **fp16_cfg) else: optimizer_config = DistOptimizerHook(**cfg.optimizer_config) # register hooks runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config) runner.register_hook(DistSamplerSeedHook()) # register eval hooks if validate: val_dataset_cfg = cfg.data.val eval_cfg = cfg.get('evaluation', {}) print(f'validation{eval_cfg}') #hyadd # if isinstance(model.module, RPN): # # TODO: implement recall hooks for other datasets # runner.register_hook( # CocoDistEvalRecallHook(val_dataset_cfg, **eval_cfg)) # else: # dataset_type = DATASETS.get(val_dataset_cfg.type) # if issubclass(dataset_type, datasets.CocoDataset): # runner.register_hook( # CocoDistEvalmAPHook(val_dataset_cfg, **eval_cfg)) # else: # runner.register_hook( # DistEvalmAPHook(val_dataset_cfg, **eval_cfg)) runner.register_hook(MyDeepLesionEval(val_dataset_cfg, **eval_cfg)) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs) def _non_dist_train(model, dataset, cfg, validate=False): if validate: raise NotImplementedError('Built-in validation is not implemented ' 'yet in not-distributed training. Use ' 'distributed training or test.py and ' '*eval.py scripts instead.') # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] data_loaders = [ build_dataloader( ds, cfg.data.imgs_per_gpu, cfg.data.workers_per_gpu, dist=True) for ds in dataset ] # put model on gpus model = MMDataParallel(model, device_ids=range(cfg.gpus)).cuda() # build runner optimizer = build_optimizer(model, cfg.optimizer) runner = Runner(model, batch_processor, optimizer, cfg.work_dir, cfg.log_level, cfg=cfg) # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=False) else: optimizer_config = cfg.optimizer_config runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config) if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow, cfg.total_epochs)
9,499
38.256198
78
py
AlignShift
AlignShift-master/mmdet/apis/env.py
import logging import os import random import subprocess import numpy as np import torch import torch.distributed as dist import torch.multiprocessing as mp from mmcv.runner import get_dist_info def init_dist(launcher, backend='nccl', **kwargs): if mp.get_start_method(allow_none=True) is None: mp.set_start_method('spawn') if launcher == 'pytorch': _init_dist_pytorch(backend, **kwargs) elif launcher == 'mpi': _init_dist_mpi(backend, **kwargs) elif launcher == 'slurm': _init_dist_slurm(backend, **kwargs) else: raise ValueError('Invalid launcher type: {}'.format(launcher)) def _init_dist_pytorch(backend, **kwargs): # TODO: use local_rank instead of rank % num_gpus rank = int(os.environ['RANK']) num_gpus = torch.cuda.device_count() torch.cuda.set_device(rank % num_gpus) dist.init_process_group(backend=backend, **kwargs) def _init_dist_mpi(backend, **kwargs): raise NotImplementedError def _init_dist_slurm(backend, port=29500, **kwargs): proc_id = int(os.environ['SLURM_PROCID']) ntasks = int(os.environ['SLURM_NTASKS']) node_list = os.environ['SLURM_NODELIST'] num_gpus = torch.cuda.device_count() torch.cuda.set_device(proc_id % num_gpus) addr = subprocess.getoutput( 'scontrol show hostname {} | head -n1'.format(node_list)) os.environ['MASTER_PORT'] = str(port) os.environ['MASTER_ADDR'] = addr os.environ['WORLD_SIZE'] = str(ntasks) os.environ['RANK'] = str(proc_id) dist.init_process_group(backend=backend) def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def get_root_logger(log_level=logging.INFO): logger = logging.getLogger() if not logger.hasHandlers(): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=log_level) rank, _ = get_dist_info() if rank != 0: logger.setLevel('ERROR') return logger
2,041
28.171429
70
py
AlignShift
AlignShift-master/mmdet/core/evaluation/eval_hooks.py
import os import os.path as osp import mmcv import numpy as np import torch import torch.distributed as dist from mmcv.parallel import collate, scatter from mmcv.runner import Hook from pycocotools.cocoeval import COCOeval from torch.utils.data import Dataset from mmdet import datasets from .coco_utils import fast_eval_recall, results2json from .mean_ap import eval_map class DistEvalHook(Hook): def __init__(self, dataset, interval=1): if isinstance(dataset, Dataset): self.dataset = dataset elif isinstance(dataset, dict): self.dataset = datasets.build_dataset(dataset, {'test_mode': True}) else: raise TypeError( 'dataset must be a Dataset object or a dict, not {}'.format( type(dataset))) self.interval = interval def after_train_epoch(self, runner): if not self.every_n_epochs(runner, self.interval): return runner.model.eval() results = [None for _ in range(len(self.dataset))] if runner.rank == 0: prog_bar = mmcv.ProgressBar(len(self.dataset)) for idx in range(runner.rank, len(self.dataset), runner.world_size): data = self.dataset[idx] data_gpu = scatter( collate([data], samples_per_gpu=1), [torch.cuda.current_device()])[0] # compute output with torch.no_grad(): result = runner.model( return_loss=False, rescale=True, **data_gpu) results[idx] = result batch_size = runner.world_size if runner.rank == 0: for _ in range(batch_size): prog_bar.update() if runner.rank == 0: print('\n') dist.barrier() for i in range(1, runner.world_size): tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(i)) tmp_results = mmcv.load(tmp_file) for idx in range(i, len(results), runner.world_size): results[idx] = tmp_results[idx] os.remove(tmp_file) self.evaluate(runner, results) else: tmp_file = osp.join(runner.work_dir, 'temp_{}.pkl'.format(runner.rank)) mmcv.dump(results, tmp_file) dist.barrier() dist.barrier() def evaluate(self): raise NotImplementedError class DistEvalmAPHook(DistEvalHook): def evaluate(self, runner, results): gt_bboxes = [] gt_labels = [] gt_ignore = [] for i in range(len(self.dataset)): ann = self.dataset.get_ann_info(i) bboxes = ann['bboxes'] labels = ann['labels'] if 'bboxes_ignore' in ann: ignore = np.concatenate([ np.zeros(bboxes.shape[0], dtype=np.bool), np.ones(ann['bboxes_ignore'].shape[0], dtype=np.bool) ]) gt_ignore.append(ignore) bboxes = np.vstack([bboxes, ann['bboxes_ignore']]) labels = np.concatenate([labels, ann['labels_ignore']]) gt_bboxes.append(bboxes) gt_labels.append(labels) if not gt_ignore: gt_ignore = None # If the dataset is VOC2007, then use 11 points mAP evaluation. if hasattr(self.dataset, 'year') and self.dataset.year == 2007: ds_name = 'voc07' else: ds_name = self.dataset.CLASSES mean_ap, eval_results = eval_map( results, gt_bboxes, gt_labels, gt_ignore=gt_ignore, scale_ranges=None, iou_thr=0.5, dataset=ds_name, print_summary=True) runner.log_buffer.output['mAP'] = mean_ap runner.log_buffer.ready = True class CocoDistEvalRecallHook(DistEvalHook): def __init__(self, dataset, interval=1, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): super(CocoDistEvalRecallHook, self).__init__( dataset, interval=interval) self.proposal_nums = np.array(proposal_nums, dtype=np.int32) self.iou_thrs = np.array(iou_thrs, dtype=np.float32) def evaluate(self, runner, results): # the official coco evaluation is too slow, here we use our own # implementation instead, which may get slightly different results ar = fast_eval_recall(results, self.dataset.coco, self.proposal_nums, self.iou_thrs) for i, num in enumerate(self.proposal_nums): runner.log_buffer.output['AR@{}'.format(num)] = ar[i] runner.log_buffer.ready = True class CocoDistEvalmAPHook(DistEvalHook): def evaluate(self, runner, results): tmp_file = osp.join(runner.work_dir, 'temp_0') result_files = results2json(self.dataset, results, tmp_file) res_types = ['bbox', 'segm' ] if runner.model.module.with_mask else ['bbox'] cocoGt = self.dataset.coco imgIds = cocoGt.getImgIds() for res_type in res_types: try: cocoDt = cocoGt.loadRes(result_files[res_type]) except IndexError: print('No prediction found.') break iou_type = res_type cocoEval = COCOeval(cocoGt, cocoDt, iou_type) cocoEval.params.imgIds = imgIds cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() metrics = ['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'] for i in range(len(metrics)): key = '{}_{}'.format(res_type, metrics[i]) val = float('{:.3f}'.format(cocoEval.stats[i])) runner.log_buffer.output[key] = val runner.log_buffer.output['{}_mAP_copypaste'.format(res_type)] = ( '{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} ' '{ap[4]:.3f} {ap[5]:.3f}').format(ap=cocoEval.stats[:6]) runner.log_buffer.ready = True for res_type in res_types: os.remove(result_files[res_type])
6,301
35.853801
79
py
AlignShift
AlignShift-master/mmdet/core/post_processing/merge_augs.py
import numpy as np import torch from mmdet.ops import nms from ..bbox import bbox_mapping_back def merge_aug_proposals(aug_proposals, img_metas, rpn_test_cfg): """Merge augmented proposals (multiscale, flip, etc.) Args: aug_proposals (list[Tensor]): proposals from different testing schemes, shape (n, 5). Note that they are not rescaled to the original image size. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and my also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. rpn_test_cfg (dict): rpn test config. Returns: Tensor: shape (n, 4), proposals corresponding to original image scale. """ recovered_proposals = [] for proposals, img_info in zip(aug_proposals, img_metas): img_shape = img_info['img_shape'] scale_factor = img_info['scale_factor'] flip = img_info['flip'] _proposals = proposals.clone() _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, scale_factor, flip) recovered_proposals.append(_proposals) aug_proposals = torch.cat(recovered_proposals, dim=0) merged_proposals, _ = nms(aug_proposals, rpn_test_cfg.nms_thr) scores = merged_proposals[:, 4] _, order = scores.sort(0, descending=True) num = min(rpn_test_cfg.max_num, merged_proposals.shape[0]) order = order[:num] merged_proposals = merged_proposals[order, :] return merged_proposals def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) recovered_bboxes.append(bboxes) bboxes = torch.stack(recovered_bboxes).mean(dim=0) if aug_scores is None: return bboxes else: scores = torch.stack(aug_scores).mean(dim=0) return bboxes, scores def merge_aug_scores(aug_scores): """Merge augmented bbox scores.""" if isinstance(aug_scores[0], torch.Tensor): return torch.mean(torch.stack(aug_scores), dim=0) else: return np.mean(aug_scores, axis=0) def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): """Merge augmented mask prediction. Args: aug_masks (list[ndarray]): shape (n, #class, h, w) img_shapes (list[ndarray]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_masks = [ mask if not img_info[0]['flip'] else mask[..., ::-1] for mask, img_info in zip(aug_masks, img_metas) ] if weights is None: merged_masks = np.mean(recovered_masks, axis=0) else: merged_masks = np.average( np.array(recovered_masks), axis=0, weights=np.array(weights)) return merged_masks
3,573
34.039216
78
py
AlignShift
AlignShift-master/mmdet/core/post_processing/bbox_nms.py
import torch from mmdet.ops.nms import nms_wrapper def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the 0th column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_thr (float): NMS IoU threshold max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. score_factors (Tensor): The factors multiplied to scores before applying NMS Returns: tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels are 0-based. """ num_classes = multi_scores.shape[1] bboxes, labels = [], [] nms_cfg_ = nms_cfg.copy() nms_type = nms_cfg_.pop('type', 'nms') nms_op = getattr(nms_wrapper, nms_type) for i in range(1, num_classes): cls_inds = multi_scores[:, i] > score_thr if not cls_inds.any(): continue # get bboxes and scores of this class if multi_bboxes.shape[1] == 4: _bboxes = multi_bboxes[cls_inds, :] else: _bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4] _scores = multi_scores[cls_inds, i] if score_factors is not None: _scores *= score_factors[cls_inds] cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1) cls_dets, _ = nms_op(cls_dets, **nms_cfg_) cls_labels = multi_bboxes.new_full((cls_dets.shape[0], ), i - 1, dtype=torch.long) bboxes.append(cls_dets) labels.append(cls_labels) if bboxes: bboxes = torch.cat(bboxes) labels = torch.cat(labels) if bboxes.shape[0] > max_num: _, inds = bboxes[:, -1].sort(descending=True) inds = inds[:max_num] bboxes = bboxes[inds] labels = labels[inds] else: bboxes = multi_bboxes.new_zeros((0, 5)) labels = multi_bboxes.new_zeros((0, ), dtype=torch.long) return bboxes, labels
2,451
35.597015
78
py
AlignShift
AlignShift-master/mmdet/core/mask/mask_target.py
import mmcv import numpy as np import torch from torch.nn.modules.utils import _pair def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg): cfg_list = [cfg for _ in range(len(pos_proposals_list))] mask_targets = map(mask_target_single, pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg_list) mask_targets = torch.cat(list(mask_targets)) return mask_targets def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): mask_size = _pair(cfg.mask_size) num_pos = pos_proposals.size(0) mask_targets = [] if num_pos > 0: proposals_np = pos_proposals.cpu().numpy() _, maxh, maxw = gt_masks.shape proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw - 1) proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh - 1) pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() for i in range(num_pos): gt_mask = gt_masks[pos_assigned_gt_inds[i]] bbox = proposals_np[i, :].astype(np.int32) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1 + 1, 1) h = np.maximum(y2 - y1 + 1, 1) # mask is uint8 both before and after resizing # mask_size (h, w) to (w, h) target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w], mask_size[::-1]) mask_targets.append(target) mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to( pos_proposals.device) else: mask_targets = pos_proposals.new_zeros((0, ) + mask_size) return mask_targets
1,700
39.5
79
py
AlignShift
AlignShift-master/mmdet/core/fp16/hooks.py
import copy import torch import torch.nn as nn from mmcv.runner import OptimizerHook from ..utils.dist_utils import allreduce_grads from .utils import cast_tensor_type class Fp16OptimizerHook(OptimizerHook): """FP16 optimizer hook. The steps of fp16 optimizer is as follows. 1. Scale the loss value. 2. BP in the fp16 model. 2. Copy gradients from fp16 model to fp32 weights. 3. Update fp32 weights. 4. Copy updated parameters from fp32 weights to fp16 model. Refer to https://arxiv.org/abs/1710.03740 for more details. Args: loss_scale (float): Scale factor multiplied with loss. """ def __init__(self, grad_clip=None, coalesce=True, step_inteval=1, bucket_size_mb=-1, loss_scale=512., distributed=True): self.grad_clip = grad_clip self.coalesce = coalesce self.bucket_size_mb = bucket_size_mb self.loss_scale = loss_scale self.distributed = distributed self.step_inteval = step_inteval def before_run(self, runner): # keep a copy of fp32 weights runner.optimizer.param_groups = copy.deepcopy( runner.optimizer.param_groups) # convert model to fp16 wrap_fp16_model(runner.model) def copy_grads_to_fp32(self, fp16_net, fp32_weights): """Copy gradients from fp16 model to fp32 weight copy.""" for fp32_param, fp16_param in zip(fp32_weights, fp16_net.parameters()): if fp16_param.grad is not None: if fp32_param.grad is None: fp32_param.grad = fp32_param.data.new(fp32_param.size()) fp32_param.grad.copy_(fp16_param.grad) def copy_params_to_fp16(self, fp16_net, fp32_weights): """Copy updated params from fp32 weight copy to fp16 model.""" for fp16_param, fp32_param in zip(fp16_net.parameters(), fp32_weights): fp16_param.data.copy_(fp32_param.data) def after_train_iter(self, runner): # clear grads of last iteration runner.model.zero_grad() runner.optimizer.zero_grad() # scale the loss value scaled_loss = runner.outputs['loss'] * self.loss_scale scaled_loss.backward() # copy fp16 grads in the model to fp32 params in the optimizer fp32_weights = [] for param_group in runner.optimizer.param_groups: fp32_weights += param_group['params'] self.copy_grads_to_fp32(runner.model, fp32_weights) # allreduce grads if self.distributed: allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb) # scale the gradients back for param in fp32_weights: if param.grad is not None: param.grad.div_(self.loss_scale) if self.grad_clip is not None: self.clip_grads(fp32_weights) # update fp32 params runner.optimizer.step() # copy fp32 params to the fp16 model self.copy_params_to_fp16(runner.model, fp32_weights) # def after_train_iter(self, runner):##hyadd # # scale the loss value # scaled_loss = runner.outputs['loss'] * self.loss_scale # scaled_loss.backward() # # copy fp16 grads in the model to fp32 params in the optimizer # if (runner.iter + 1) % self.step_inteval == 0: # fp32_weights = [] # for param_group in runner.optimizer.param_groups: # fp32_weights += param_group['params'] # self.copy_grads_to_fp32(runner.model, fp32_weights) # # allreduce grads # if self.distributed: # allreduce_grads(fp32_weights, self.coalesce, self.bucket_size_mb) # # scale the gradients back # for param in fp32_weights: # if param.grad is not None: # param.grad.div_(self.loss_scale) # if self.grad_clip is not None: # self.clip_grads(fp32_weights) # # update fp32 params # runner.optimizer.step() # # clear grads of last iteration # runner.model.zero_grad() # runner.optimizer.zero_grad() # # copy fp32 params to the fp16 model # self.copy_params_to_fp16(runner.model, fp32_weights) def wrap_fp16_model(model): # convert model to fp16 model.half() # patch the normalization layers to make it work in fp32 mode patch_norm_fp32(model) # set `fp16_enabled` flag for m in model.modules(): if hasattr(m, 'fp16_enabled'): m.fp16_enabled = True def patch_norm_fp32(module): if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): module.float() module.forward = patch_forward_method(module.forward, torch.half, torch.float) for child in module.children(): patch_norm_fp32(child) return module def patch_forward_method(func, src_type, dst_type, convert_output=True): """Patch the forward method of a module. Args: func (callable): The original forward method. src_type (torch.dtype): Type of input arguments to be converted from. dst_type (torch.dtype): Type of input arguments to be converted to. convert_output (bool): Whether to convert the output back to src_type. Returns: callable: The patched forward method. """ def new_forward(*args, **kwargs): output = func(*cast_tensor_type(args, src_type, dst_type), **cast_tensor_type(kwargs, src_type, dst_type)) if convert_output: output = cast_tensor_type(output, dst_type, src_type) return output return new_forward
5,828
36.606452
83
py
AlignShift
AlignShift-master/mmdet/core/fp16/utils.py
from collections import abc import numpy as np import torch def cast_tensor_type(inputs, src_type, dst_type): if isinstance(inputs, torch.Tensor): return inputs.to(dst_type) elif isinstance(inputs, str): return inputs elif isinstance(inputs, np.ndarray): return inputs elif isinstance(inputs, abc.Mapping): return type(inputs)({ k: cast_tensor_type(v, src_type, dst_type) for k, v in inputs.items() }) elif isinstance(inputs, abc.Iterable): return type(inputs)( cast_tensor_type(item, src_type, dst_type) for item in inputs) else: return inputs
664
26.708333
74
py
AlignShift
AlignShift-master/mmdet/core/fp16/decorators.py
import functools from inspect import getfullargspec import torch from .utils import cast_tensor_type def auto_fp16(apply_to=None, out_fp32=False): """Decorator to enable fp16 training automatically. This decorator is useful when you write custom modules and want to support mixed precision training. If inputs arguments are fp32 tensors, they will be converted to fp16 automatically. Arguments other than fp32 tensors are ignored. Args: apply_to (Iterable, optional): The argument names to be converted. `None` indicates all arguments. out_fp32 (bool): Whether to convert the output back to fp32. :Example: class MyModule1(nn.Module) # Convert x and y to fp16 @auto_fp16() def forward(self, x, y): pass class MyModule2(nn.Module): # convert pred to fp16 @auto_fp16(apply_to=('pred', )) def do_something(self, pred, others): pass """ def auto_fp16_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): # check if the module has set the attribute `fp16_enabled`, if not, # just fallback to the original method. if not isinstance(args[0], torch.nn.Module): raise TypeError('@auto_fp16 can only be used to decorate the ' 'method of nn.Module') if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): return old_func(*args, **kwargs) # get the arg spec of the decorated method args_info = getfullargspec(old_func) # get the argument names to be casted args_to_cast = args_info.args if apply_to is None else apply_to # convert the args that need to be processed new_args = [] # NOTE: default args are not taken into consideration if args: arg_names = args_info.args[:len(args)] for i, arg_name in enumerate(arg_names): if arg_name in args_to_cast: new_args.append( cast_tensor_type(args[i], torch.float, torch.half)) else: new_args.append(args[i]) # convert the kwargs that need to be processed new_kwargs = {} if kwargs: for arg_name, arg_value in kwargs.items(): if arg_name in args_to_cast: new_kwargs[arg_name] = cast_tensor_type( arg_value, torch.float, torch.half) else: new_kwargs[arg_name] = arg_value # apply converted arguments to the decorated method output = old_func(*new_args, **new_kwargs) # cast the results back to fp32 if necessary if out_fp32: output = cast_tensor_type(output, torch.half, torch.float) return output return new_func return auto_fp16_wrapper def force_fp32(apply_to=None, out_fp16=False): """Decorator to convert input arguments to fp32 in force. This decorator is useful when you write custom modules and want to support mixed precision training. If there are some inputs that must be processed in fp32 mode, then this decorator can handle it. If inputs arguments are fp16 tensors, they will be converted to fp32 automatically. Arguments other than fp16 tensors are ignored. Args: apply_to (Iterable, optional): The argument names to be converted. `None` indicates all arguments. out_fp16 (bool): Whether to convert the output back to fp16. :Example: class MyModule1(nn.Module) # Convert x and y to fp32 @force_fp32() def loss(self, x, y): pass class MyModule2(nn.Module): # convert pred to fp32 @force_fp32(apply_to=('pred', )) def post_process(self, pred, others): pass """ def force_fp32_wrapper(old_func): @functools.wraps(old_func) def new_func(*args, **kwargs): # check if the module has set the attribute `fp16_enabled`, if not, # just fallback to the original method. if not isinstance(args[0], torch.nn.Module): raise TypeError('@force_fp32 can only be used to decorate the ' 'method of nn.Module') if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled): return old_func(*args, **kwargs) # get the arg spec of the decorated method args_info = getfullargspec(old_func) # get the argument names to be casted args_to_cast = args_info.args if apply_to is None else apply_to # convert the args that need to be processed new_args = [] if args: arg_names = args_info.args[:len(args)] for i, arg_name in enumerate(arg_names): if arg_name in args_to_cast: new_args.append( cast_tensor_type(args[i], torch.half, torch.float)) else: new_args.append(args[i]) # convert the kwargs that need to be processed new_kwargs = dict() if kwargs: for arg_name, arg_value in kwargs.items(): if arg_name in args_to_cast: new_kwargs[arg_name] = cast_tensor_type( arg_value, torch.half, torch.float) else: new_kwargs[arg_name] = arg_value # apply converted arguments to the decorated method output = old_func(*new_args, **new_kwargs) # cast the results back to fp32 if necessary if out_fp16: output = cast_tensor_type(output, torch.float, torch.half) return output return new_func return force_fp32_wrapper
6,211
37.583851
79
py
AlignShift
AlignShift-master/mmdet/core/bbox/bbox_target.py
import torch from ..utils import multi_apply from .transforms import bbox2delta def bbox_target(pos_bboxes_list, neg_bboxes_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg, reg_classes=1, target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0], concat=True): labels, label_weights, bbox_targets, bbox_weights = multi_apply( bbox_target_single, pos_bboxes_list, neg_bboxes_list, pos_gt_bboxes_list, pos_gt_labels_list, cfg=cfg, reg_classes=reg_classes, target_means=target_means, target_stds=target_stds) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights def bbox_target_single(pos_bboxes, neg_bboxes, pos_gt_bboxes, pos_gt_labels, cfg, reg_classes=1, target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]): num_pos = pos_bboxes.size(0) num_neg = neg_bboxes.size(0) num_samples = num_pos + num_neg labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long) label_weights = pos_bboxes.new_zeros(num_samples) bbox_targets = pos_bboxes.new_zeros(num_samples, 4) bbox_weights = pos_bboxes.new_zeros(num_samples, 4) if num_pos > 0: labels[:num_pos] = pos_gt_labels pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight label_weights[:num_pos] = pos_weight pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means, target_stds) bbox_targets[:num_pos, :] = pos_bbox_targets bbox_weights[:num_pos, :] = 1 if num_neg > 0: label_weights[-num_neg:] = 1.0 return labels, label_weights, bbox_targets, bbox_weights def expand_target(bbox_targets, bbox_weights, labels, num_classes): bbox_targets_expand = bbox_targets.new_zeros( (bbox_targets.size(0), 4 * num_classes)) bbox_weights_expand = bbox_weights.new_zeros( (bbox_weights.size(0), 4 * num_classes)) for i in torch.nonzero(labels > 0).squeeze(-1): start, end = labels[i] * 4, (labels[i] + 1) * 4 bbox_targets_expand[i, start:end] = bbox_targets[i, :] bbox_weights_expand[i, start:end] = bbox_weights[i, :] return bbox_targets_expand, bbox_weights_expand
2,717
35.72973
78
py
AlignShift
AlignShift-master/mmdet/core/bbox/geometry.py
import torch def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False): """Calculate overlap between two set of bboxes. If ``is_aligned`` is ``False``, then calculate the ious between each bbox of bboxes1 and bboxes2, otherwise the ious between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (m, 4) bboxes2 (Tensor): shape (n, 4), if is_aligned is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union) or iof (intersection over foreground). Returns: ious(Tensor): shape (m, n) if is_aligned == False else shape (m, 1) """ assert mode in ['iou', 'iof'] rows = bboxes1.size(0) cols = bboxes2.size(0) if is_aligned: assert rows == cols if rows * cols == 0: return bboxes1.new(rows, 1) if is_aligned else bboxes1.new(rows, cols) if is_aligned: lt = torch.max(bboxes1[:, :2], bboxes2[:, :2]) # [rows, 2] rb = torch.min(bboxes1[:, 2:], bboxes2[:, 2:]) # [rows, 2] wh = (rb - lt + 1).clamp(min=0) # [rows, 2] overlap = wh[:, 0] * wh[:, 1] area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( bboxes1[:, 3] - bboxes1[:, 1] + 1) if mode == 'iou': area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( bboxes2[:, 3] - bboxes2[:, 1] + 1) ious = overlap / (area1 + area2 - overlap) else: ious = overlap / area1 else: lt = torch.max(bboxes1[:, None, :2], bboxes2[:, :2]) # [rows, cols, 2] rb = torch.min(bboxes1[:, None, 2:], bboxes2[:, 2:]) # [rows, cols, 2] wh = (rb - lt + 1).clamp(min=0) # [rows, cols, 2] overlap = wh[:, :, 0] * wh[:, :, 1] area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * ( bboxes1[:, 3] - bboxes1[:, 1] + 1) if mode == 'iou': area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * ( bboxes2[:, 3] - bboxes2[:, 1] + 1) ious = overlap / (area1[:, None] + area2 - overlap) else: ious = overlap / (area1[:, None]) return ious
2,163
32.8125
79
py
AlignShift
AlignShift-master/mmdet/core/bbox/transforms.py
import mmcv import numpy as np import torch def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]): assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] + 1.0 ph = proposals[..., 3] - proposals[..., 1] + 1.0 gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] + 1.0 gh = gt[..., 3] - gt[..., 1] + 1.0 dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas def delta2bbox(rois, deltas, means=[0, 0, 0, 0], stds=[1, 1, 1, 1], max_shape=None, wh_ratio_clip=16 / 1000): """ Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. Args: rois (Tensor): boxes to be transformed. Has shape (N, 4) deltas (Tensor): encoded offsets with respect to each roi. Has shape (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors. Offset encoding follows [1]_. means (list): denormalizing means for delta coordinates stds (list): denormalizing standard deviation for delta coordinates max_shape (tuple[int, int]): maximum bounds for boxes. specifies (H, W) wh_ratio_clip (float): maximum aspect ratio for boxes. Returns: Tensor: boxes with shape (N, 4), where columns represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.2817, 0.2817, 4.7183, 4.7183], [0.0000, 0.6321, 7.3891, 0.3679], [5.8967, 2.9251, 5.5033, 3.2749]]) """ # with open('./tests/debug.txt', 'a') as f: # f.write(f'{deltas.shape}\t{deltas.max()}\t{deltas.min()}\n') means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[:, 0::4] dy = denorm_deltas[:, 1::4] dw = denorm_deltas[:, 2::4] dh = denorm_deltas[:, 3::4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Compute center of each roi px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) # Compute width/height of each roi pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy # Convert center-xy/width/height to top-left, bottom-right x1 = gx - gw * 0.5 + 0.5 y1 = gy - gh * 0.5 + 0.5 x2 = gx + gw * 0.5 - 0.5 y2 = gy + gh * 0.5 - 0.5 if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) return bboxes def bbox_flip(bboxes, img_shape): """Flip bboxes horizontally. Args: bboxes(Tensor or ndarray): Shape (..., 4*k) img_shape(tuple): Image shape. Returns: Same type as `bboxes`: Flipped bboxes. """ if isinstance(bboxes, torch.Tensor): assert bboxes.shape[-1] % 4 == 0 flipped = bboxes.clone() flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4] - 1 flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4] - 1 return flipped elif isinstance(bboxes, np.ndarray): return mmcv.bbox_flip(bboxes, img_shape) def bbox_mapping(bboxes, img_shape, scale_factor, flip): """Map bboxes from the original image scale to testing scale""" new_bboxes = bboxes * scale_factor if flip: new_bboxes = bbox_flip(new_bboxes, img_shape) return new_bboxes def bbox_mapping_back(bboxes, img_shape, scale_factor, flip): """Map bboxes from testing scale to original image scale""" new_bboxes = bbox_flip(bboxes, img_shape) if flip else bboxes new_bboxes = new_bboxes / scale_factor return new_bboxes def bbox2roi(bbox_list): """Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): if bboxes.size(0) > 0: img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) else: rois = bboxes.new_zeros((0, 5)) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois def roi2bbox(rois): bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list def bbox2result(bboxes, labels, num_classes): """Convert detection results to a list of numpy arrays. Args: bboxes (Tensor): shape (n, 5) labels (Tensor): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """ if bboxes.shape[0] == 0: return [ np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1) ] else: bboxes = bboxes.cpu().numpy() labels = labels.cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes - 1)] def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (n, 2), [x, y]. distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). max_shape (tuple): Shape of the image. Returns: Tensor: Decoded bboxes. """ x1 = points[:, 0] - distance[:, 0] y1 = points[:, 1] - distance[:, 1] x2 = points[:, 0] + distance[:, 2] y2 = points[:, 1] + distance[:, 3] if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) return torch.stack([x1, y1, x2, y2], -1)
7,887
33.902655
79
py
AlignShift
AlignShift-master/mmdet/core/bbox/assigners/assign_result.py
import torch class AssignResult(object): def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels def add_gt_(self, gt_labels): self_inds = torch.arange( 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat( [self.max_overlaps.new_ones(self.num_gts), self.max_overlaps]) if self.labels is not None: self.labels = torch.cat([gt_labels, self.labels])
664
32.25
77
py
AlignShift
AlignShift-master/mmdet/core/bbox/assigners/point_assigner.py
import torch from .assign_result import AssignResult from .base_assigner import BaseAssigner class PointAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each point. Each proposals will be assigned with `0`, or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt """ def __init__(self, scale=4, pos_num=3): self.scale = scale self.pos_num = pos_num def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to points. This method assign a gt bbox to every points set, each points set will be assigned with 0, or a positive number. 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every points to 0 2. A point is assigned to some gt bbox if (i) the point is within the k closest points to the gt bbox (ii) the distance between this point and the gt is smaller than other gt bboxes Args: points (Tensor): points to be assigned, shape(n, 3) while last dimension stands for (x, y, stride). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ if points.shape[0] == 0 or gt_bboxes.shape[0] == 0: raise ValueError('No gt or bboxes') points_xy = points[:, :2] points_stride = points[:, 2] points_lvl = torch.log2( points_stride).int() # [3...,4...,5...,6...,7...] lvl_min, lvl_max = points_lvl.min(), points_lvl.max() num_gts, num_points = gt_bboxes.shape[0], points.shape[0] # assign gt box gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) scale = self.scale gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) # stores the assigned gt index of each point assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) # stores the assigned gt dist (to this point) of each point assigned_gt_dist = points.new_full((num_points, ), float('inf')) points_range = torch.arange(points.shape[0]) for idx in range(num_gts): gt_lvl = gt_bboxes_lvl[idx] # get the index of points in this level lvl_idx = gt_lvl == points_lvl points_index = points_range[lvl_idx] # get the points in this level lvl_points = points_xy[lvl_idx, :] # get the center point of gt gt_point = gt_bboxes_xy[[idx], :] # get width and height of gt gt_wh = gt_bboxes_wh[[idx], :] # compute the distance between gt center and # all points in this level points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) # find the nearest k points to gt center in this level min_dist, min_dist_index = torch.topk( points_gt_dist, self.pos_num, largest=False) # the index of nearest k points to gt center in this level min_dist_points_index = points_index[min_dist_index] # The less_than_recorded_index stores the index # of min_dist that is less then the assigned_gt_dist. Where # assigned_gt_dist stores the dist from previous assigned gt # (if exist) to each point. less_than_recorded_index = min_dist < assigned_gt_dist[ min_dist_points_index] # The min_dist_points_index stores the index of points satisfy: # (1) it is k nearest to current gt center in this level. # (2) it is closer to current gt center than other gt center. min_dist_points_index = min_dist_points_index[ less_than_recorded_index] # assign the result assigned_gt_inds[min_dist_points_index] = idx + 1 assigned_gt_dist[min_dist_points_index] = min_dist[ less_than_recorded_index] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_zeros((num_points, )) pos_inds = torch.nonzero(assigned_gt_inds > 0).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels)
5,183
43.307692
79
py
AlignShift
AlignShift-master/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
import torch from ..geometry import bbox_overlaps from .max_iou_assigner import MaxIoUAssigner class ApproxMaxIoUAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, gpu_assign_thr=-1): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr def assign(self, approxs, squares, approxs_per_octave, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to approxs. This method assign a gt bbox to each group of approxs (bboxes), each group of approxs is represent by a base approx (bbox) and will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. use the max IoU of each group of approxs to assign 2. assign proposals whose iou with all gts < neg_iou_thr to 0 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: approxs (Tensor): Bounding boxes to be assigned, shape(approxs_per_octave*n, 4). squares (Tensor): Base Bounding boxes to be assigned, shape(n, 4). approxs_per_octave (int): number of approxs per octave gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ if squares.shape[0] == 0 or gt_bboxes.shape[0] == 0: raise ValueError('No gt or approxs') num_squares = squares.size(0) num_gts = gt_bboxes.size(0) # re-organize anchors by approxs_per_octave x num_squares approxs = torch.transpose( approxs.view(num_squares, approxs_per_octave, 4), 0, 1).contiguous().view(-1, 4) assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( num_gts > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = approxs.device approxs = approxs.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() all_overlaps = bbox_overlaps(approxs, gt_bboxes) overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, num_gts).max(dim=0) overlaps = torch.transpose(overlaps, 0, 1) bboxes = squares[:, :4] if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and ( gt_bboxes_ignore.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = bbox_overlaps( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = bbox_overlaps( gt_bboxes_ignore, bboxes, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
5,910
42.463235
79
py
AlignShift
AlignShift-master/mmdet/core/bbox/assigners/max_iou_assigner.py
import torch from ..geometry import bbox_overlaps from .assign_result import AssignResult from .base_assigner import BaseAssigner class MaxIoUAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, gpu_assign_thr=-1): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. assign proposals whose iou with all gts < neg_iou_thr to 0 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ if bboxes.shape[0] == 0 or gt_bboxes.shape[0] == 0: raise ValueError('No gt or bboxes') assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = bboxes.device bboxes = bboxes.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() bboxes = bboxes[:, :4] overlaps = bbox_overlaps(gt_bboxes, bboxes) if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and ( gt_bboxes_ignore.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = bbox_overlaps( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = bbox_overlaps( gt_bboxes_ignore, bboxes, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result def assign_wrt_overlaps(self, overlaps, gt_labels=None): """Assign w.r.t. the overlaps of bboxes with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ if overlaps.numel() == 0: raise ValueError('No gt or proposals') num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 # 4. assign fg: for each gt, proposals with highest IoU for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_zeros((num_bboxes, )) pos_inds = torch.nonzero(assigned_gt_inds > 0).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
7,657
42.76
79
py
AlignShift
AlignShift-master/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
import numpy as np import torch from .random_sampler import RandomSampler class InstanceBalancedPosSampler(RandomSampler): def _sample_pos(self, assign_result, num_expected, **kwargs): pos_inds = torch.nonzero(assign_result.gt_inds > 0) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: unique_gt_inds = assign_result.gt_inds[pos_inds].unique() num_gts = len(unique_gt_inds) num_per_gt = int(round(num_expected / float(num_gts)) + 1) sampled_inds = [] for i in unique_gt_inds: inds = torch.nonzero(assign_result.gt_inds == i.item()) if inds.numel() != 0: inds = inds.squeeze(1) else: continue if len(inds) > num_per_gt: inds = self.random_choice(inds, num_per_gt) sampled_inds.append(inds) sampled_inds = torch.cat(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array( list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) extra_inds = torch.from_numpy(extra_inds).to( assign_result.gt_inds.device).long() sampled_inds = torch.cat([sampled_inds, extra_inds]) elif len(sampled_inds) > num_expected: sampled_inds = self.random_choice(sampled_inds, num_expected) return sampled_inds
1,765
41.047619
77
py
AlignShift
AlignShift-master/mmdet/core/bbox/samplers/base_sampler.py
from abc import ABCMeta, abstractmethod import torch from .sampling_result import SamplingResult class BaseSampler(metaclass=ABCMeta): def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): self.num = num self.pos_fraction = pos_fraction self.neg_pos_ub = neg_pos_ub self.add_gt_as_proposals = add_gt_as_proposals self.pos_sampler = self self.neg_sampler = self @abstractmethod def _sample_pos(self, assign_result, num_expected, **kwargs): pass @abstractmethod def _sample_neg(self, assign_result, num_expected, **kwargs): pass def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: :obj:`SamplingResult`: Sampling result. """ bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals: bboxes = torch.cat([gt_bboxes, bboxes], dim=0) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) assign_result.add_gt_(bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.long)) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) # We found that sampled indices have duplicated items occasionally. # (may be a bug of PyTorch) pos_inds = pos_inds.unique() num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes=bboxes, **kwargs) neg_inds = neg_inds.unique() return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags)
2,806
34.531646
88
py
AlignShift
AlignShift-master/mmdet/core/bbox/samplers/random_sampler.py
import numpy as np import torch from .base_sampler import BaseSampler class RandomSampler(BaseSampler): def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) @staticmethod def random_choice(gallery, num): """Random select some elements from the gallery. It seems that Pytorch's implementation is slower than numpy so we use numpy to randperm the indices. """ assert len(gallery) >= num if isinstance(gallery, list): gallery = np.array(gallery) cands = np.arange(len(gallery)) np.random.shuffle(cands) rand_inds = cands[:num] if not isinstance(gallery, np.ndarray): rand_inds = torch.from_numpy(rand_inds).long().to(gallery.device) return gallery[rand_inds] def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, **kwargs): """Randomly sample some negative samples.""" neg_inds = torch.nonzero(assign_result.gt_inds == 0) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: return self.random_choice(neg_inds, num_expected)
1,858
33.425926
77
py
AlignShift
AlignShift-master/mmdet/core/bbox/samplers/ohem_sampler.py
import torch from ..transforms import bbox2roi from .base_sampler import BaseSampler class OHEMSampler(BaseSampler): """ Online Hard Example Mining Sampler described in [1]_. References: .. [1] https://arxiv.org/pdf/1604.03540.pdf """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) if not hasattr(context, 'num_stages'): self.bbox_roi_extractor = context.bbox_roi_extractor self.bbox_head = context.bbox_head else: self.bbox_roi_extractor = context.bbox_roi_extractor[ context.current_stage] self.bbox_head = context.bbox_head[context.current_stage] def hard_mining(self, inds, num_expected, bboxes, labels, feats): with torch.no_grad(): rois = bbox2roi([bboxes]) bbox_feats = self.bbox_roi_extractor( feats[:self.bbox_roi_extractor.num_inputs], rois) cls_score, _ = self.bbox_head(bbox_feats) loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls'] _, topk_loss_inds = loss.topk(num_expected) return inds[topk_loss_inds] def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): # Sample some hard positive samples pos_inds = torch.nonzero(assign_result.gt_inds > 0) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats) def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): # Sample some hard negative samples neg_inds = torch.nonzero(assign_result.gt_inds == 0) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], assign_result.labels[neg_inds], feats)
2,912
35.4125
77
py
AlignShift
AlignShift-master/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
import numpy as np import torch from .random_sampler import RandomSampler class IoUBalancedNegSampler(RandomSampler): """IoU Balanced Sampling arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Sampling proposals according to their IoU. `floor_fraction` of needed RoIs are sampled from proposals whose IoU are lower than `floor_thr` randomly. The others are sampled from proposals whose IoU are higher than `floor_thr`. These proposals are sampled from some bins evenly, which are split by `num_bins` via IoU evenly. Args: num (int): number of proposals. pos_fraction (float): fraction of positive proposals. floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, set to -1 if all using IoU balanced sampling. floor_fraction (float): sampling fraction of proposals under floor_thr. num_bins (int): number of bins in IoU balanced sampling. """ def __init__(self, num, pos_fraction, floor_thr=-1, floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert floor_thr >= 0 or floor_thr == -1 assert 0 <= floor_fraction <= 1 assert num_bins >= 1 self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): max_iou = max_overlaps.max() iou_interval = (max_iou - self.floor_thr) / self.num_bins per_num_expected = int(num_expected / self.num_bins) sampled_inds = [] for i in range(self.num_bins): start_iou = self.floor_thr + i * iou_interval end_iou = self.floor_thr + (i + 1) * iou_interval tmp_set = set( np.where( np.logical_and(max_overlaps >= start_iou, max_overlaps < end_iou))[0]) tmp_inds = list(tmp_set & full_set) if len(tmp_inds) > per_num_expected: tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(full_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): neg_inds = torch.nonzero(assign_result.gt_inds == 0) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() # balance sampling for negative samples neg_set = set(neg_inds.cpu().numpy()) if self.floor_thr > 0: floor_set = set( np.where( np.logical_and(max_overlaps >= 0, max_overlaps < self.floor_thr))[0]) iou_sampling_set = set( np.where(max_overlaps >= self.floor_thr)[0]) elif self.floor_thr == 0: floor_set = set(np.where(max_overlaps == 0)[0]) iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) else: floor_set = set() iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) # for sampling interval calculation self.floor_thr = 0 floor_neg_inds = list(floor_set & neg_set) iou_sampling_neg_inds = list(iou_sampling_set & neg_set) num_expected_iou_sampling = int(num_expected * (1 - self.floor_fraction)) if len(iou_sampling_neg_inds) > num_expected_iou_sampling: if self.num_bins >= 2: iou_sampled_inds = self.sample_via_interval( max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice( iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array( iou_sampling_neg_inds, dtype=np.int) num_expected_floor = num_expected - len(iou_sampled_inds) if len(floor_neg_inds) > num_expected_floor: sampled_floor_inds = self.random_choice( floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) sampled_inds = np.concatenate( (sampled_floor_inds, iou_sampled_inds)) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(neg_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to( assign_result.gt_inds.device) return sampled_inds
5,956
42.801471
79
py
AlignShift
AlignShift-master/mmdet/core/bbox/samplers/sampling_result.py
import torch class SamplingResult(object): def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags): self.pos_inds = pos_inds self.neg_inds = neg_inds self.pos_bboxes = bboxes[pos_inds] self.neg_bboxes = bboxes[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_bboxes.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :] if assign_result.labels is not None: self.pos_gt_labels = assign_result.labels[pos_inds] else: self.pos_gt_labels = None @property def bboxes(self): return torch.cat([self.pos_bboxes, self.neg_bboxes])
790
30.64
76
py
AlignShift
AlignShift-master/mmdet/core/bbox/samplers/pseudo_sampler.py
import torch from .base_sampler import BaseSampler from .sampling_result import SamplingResult class PseudoSampler(BaseSampler): def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): raise NotImplementedError def _sample_neg(self, **kwargs): raise NotImplementedError def sample(self, assign_result, bboxes, gt_bboxes, **kwargs): pos_inds = torch.nonzero( assign_result.gt_inds > 0).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0).squeeze(-1).unique() gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result
829
29.740741
79
py
AlignShift
AlignShift-master/mmdet/core/utils/dist_utils.py
from collections import OrderedDict import torch.distributed as dist from mmcv.runner import OptimizerHook from torch._utils import (_flatten_dense_tensors, _take_tensors, _unflatten_dense_tensors) def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): if bucket_size_mb > 0: bucket_size_bytes = bucket_size_mb * 1024 * 1024 buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for tensor, synced in zip( bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced) def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): grads = [ param.grad.data for param in params if param.requires_grad and param.grad is not None ] world_size = dist.get_world_size() if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size)) class DistOptimizerHook(OptimizerHook): def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1): self.grad_clip = grad_clip self.coalesce = coalesce self.bucket_size_mb = bucket_size_mb def after_train_iter(self, runner): runner.optimizer.zero_grad() runner.outputs['loss'].backward() allreduce_grads(runner.model.parameters(), self.coalesce, self.bucket_size_mb) if self.grad_clip is not None: self.clip_grads(runner.model.parameters()) runner.optimizer.step()
1,967
32.355932
73
py
AlignShift
AlignShift-master/mmdet/core/anchor/anchor_target.py
import torch from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner from ..utils import multi_apply def anchor_target(anchor_list, valid_flag_list, gt_bboxes_list, img_metas, target_means, target_stds, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True): """Compute regression and classification targets for anchors. Args: anchor_list (list[list]): Multi level anchors of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. target_means (Iterable): Mean value of regression targets. target_stds (Iterable): Std value of regression targets. cfg (dict): RPN train configs. Returns: tuple """ num_imgs = len(img_metas) assert len(anchor_list) == len(valid_flag_list) == num_imgs # anchor number of multi levels num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] # concat all level anchors and flags to a single tensor for i in range(num_imgs): assert len(anchor_list[i]) == len(valid_flag_list[i]) anchor_list[i] = torch.cat(anchor_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( anchor_target_single, anchor_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, img_metas, target_means=target_means, target_stds=target_stds, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) # no valid anchors if any([labels is None for labels in all_labels]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels labels_list = images_to_levels(all_labels, num_level_anchors) label_weights_list = images_to_levels(all_label_weights, num_level_anchors) bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors) return (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) def images_to_levels(target, num_level_anchors): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_anchors: end = start + n level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets def anchor_target_single(flat_anchors, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, img_meta, target_means, target_stds, cfg, label_channels=1, sampling=True, unmap_outputs=True): inside_flags = anchor_inside_flags(flat_anchors, valid_flags, img_meta['img_shape'][:2], cfg.allowed_border) if not inside_flags.any(): return (None, ) * 6 # assign gt and sample anchors anchors = flat_anchors[inside_flags, :] if sampling: assign_result, sampling_result = assign_and_sample( anchors, gt_bboxes, gt_bboxes_ignore, None, cfg) else: bbox_assigner = build_assigner(cfg.assigner) assign_result = bbox_assigner.assign(anchors, gt_bboxes, gt_bboxes_ignore, gt_labels) bbox_sampler = PseudoSampler() sampling_result = bbox_sampler.sample(assign_result, anchors, gt_bboxes) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, target_means, target_stds) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 if gt_labels is None: labels[pos_inds] = 1 else: labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] if cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) labels = unmap(labels, num_total_anchors, inside_flags) label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds) def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border).type(torch.uint8) & \ (flat_anchors[:, 1] >= -allowed_border).type(torch.uint8) & \ (flat_anchors[:, 2] < img_w + allowed_border).type(torch.uint8) & \ (flat_anchors[:, 3] < img_h + allowed_border).type(torch.uint8) else: inside_flags = valid_flags return inside_flags def unmap(data, count, inds, fill=0): """ Unmap a subset of item (data) back to the original set of items (of size count) """ if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds, :] = data return ret
7,318
37.724868
79
py
AlignShift
AlignShift-master/mmdet/core/anchor/guided_anchor_target.py
import torch from ..bbox import PseudoSampler, build_assigner, build_sampler from ..utils import multi_apply, unmap def calc_region(bbox, ratio, featmap_size=None): """Calculate a proportional bbox region. The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4) ratio (float): Ratio of the output region. featmap_size (tuple): Feature map size used for clipping the boundary. Returns: tuple: x1, y1, x2, y2 """ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1] - 1) y1 = y1.clamp(min=0, max=featmap_size[0] - 1) x2 = x2.clamp(min=0, max=featmap_size[1] - 1) y2 = y2.clamp(min=0, max=featmap_size[0] - 1) return (x1, y1, x2, y2) def ga_loc_target(gt_bboxes_list, featmap_sizes, anchor_scale, anchor_strides, center_ratio=0.2, ignore_ratio=0.5): """Compute location targets for guided anchoring. Each feature map is divided into positive, negative and ignore regions. - positive regions: target 1, weight 1 - ignore regions: target 0, weight 0 - negative regions: target 0, weight 0.1 Args: gt_bboxes_list (list[Tensor]): Gt bboxes of each image. featmap_sizes (list[tuple]): Multi level sizes of each feature maps. anchor_scale (int): Anchor scale. anchor_strides ([list[int]]): Multi level anchor strides. center_ratio (float): Ratio of center region. ignore_ratio (float): Ratio of ignore region. Returns: tuple """ img_per_gpu = len(gt_bboxes_list) num_lvls = len(featmap_sizes) r1 = (1 - center_ratio) / 2 r2 = (1 - ignore_ratio) / 2 all_loc_targets = [] all_loc_weights = [] all_ignore_map = [] for lvl_id in range(num_lvls): h, w = featmap_sizes[lvl_id] loc_targets = torch.zeros( img_per_gpu, 1, h, w, device=gt_bboxes_list[0].device, dtype=torch.float32) loc_weights = torch.full_like(loc_targets, -1) ignore_map = torch.zeros_like(loc_targets) all_loc_targets.append(loc_targets) all_loc_weights.append(loc_weights) all_ignore_map.append(ignore_map) for img_id in range(img_per_gpu): gt_bboxes = gt_bboxes_list[img_id] scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) # assign gt bboxes to different feature levels w.r.t. their scales target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() for gt_id in range(gt_bboxes.size(0)): lvl = target_lvls[gt_id].item() # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl] # calculate ignore regions ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[lvl]) # calculate positive (center) regions ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region( gt_, r1, featmap_sizes[lvl]) all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 0 all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 + 1] = 1 # calculate ignore map on nearby low level feature if lvl > 0: d_lvl = lvl - 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[d_lvl]) all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 # calculate ignore map on nearby high level feature if lvl < num_lvls - 1: u_lvl = lvl + 1 # rescaled to corresponding feature map gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl] ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region( gt_, r2, featmap_sizes[u_lvl]) all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1, ignore_x1:ignore_x2 + 1] = 1 for lvl_id in range(num_lvls): # ignore negative regions w.r.t. ignore map all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0) & (all_ignore_map[lvl_id] > 0)] = 0 # set negative regions with weight 0.1 all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1 # loc average factor to balance loss loc_avg_factor = sum( [t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200 return all_loc_targets, all_loc_weights, loc_avg_factor def ga_shape_target(approx_list, inside_flag_list, square_list, gt_bboxes_list, img_metas, approxs_per_octave, cfg, gt_bboxes_ignore_list=None, sampling=True, unmap_outputs=True): """Compute guided anchoring targets. Args: approx_list (list[list]): Multi level approxs of each image. inside_flag_list (list[list]): Multi level inside flags of each image. square_list (list[list]): Multi level squares of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. approxs_per_octave (int): number of approxs per octave cfg (dict): RPN train configs. gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes. sampling (bool): sampling or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ num_imgs = len(img_metas) assert len(approx_list) == len(inside_flag_list) == len( square_list) == num_imgs # anchor number of multi levels num_level_squares = [squares.size(0) for squares in square_list[0]] # concat all level anchors and flags to a single tensor inside_flag_flat_list = [] approx_flat_list = [] square_flat_list = [] for i in range(num_imgs): assert len(square_list[i]) == len(inside_flag_list[i]) inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) approx_flat_list.append(torch.cat(approx_list[i])) square_flat_list.append(torch.cat(square_list[i])) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply( ga_shape_target_single, approx_flat_list, inside_flag_flat_list, square_flat_list, gt_bboxes_list, gt_bboxes_ignore_list, img_metas, approxs_per_octave=approxs_per_octave, cfg=cfg, sampling=sampling, unmap_outputs=unmap_outputs) # no valid anchors if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]): return None # sampled anchors of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) # split targets to a list w.r.t. multiple levels bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares) bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares) bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares) return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos, num_total_neg) def images_to_levels(target, num_level_anchors): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_anchors: end = start + n level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets def ga_shape_target_single(flat_approxs, inside_flags, flat_squares, gt_bboxes, gt_bboxes_ignore, img_meta, approxs_per_octave, cfg, sampling=True, unmap_outputs=True): """Compute guided anchoring targets. This function returns sampled anchors and gt bboxes directly rather than calculates regression targets. Args: flat_approxs (Tensor): flat approxs of a single image, shape (n, 4) inside_flags (Tensor): inside flags of a single image, shape (n, ). flat_squares (Tensor): flat squares of a single image, shape (approxs_per_octave * n, 4) gt_bboxes (Tensor): Ground truth bboxes of a single image. img_meta (dict): Meta info of a single image. approxs_per_octave (int): number of approxs per octave cfg (dict): RPN train configs. sampling (bool): sampling or not. unmap_outputs (bool): unmap outputs or not. Returns: tuple """ if not inside_flags.any(): return (None, ) * 6 # assign gt and sample anchors expand_inside_flags = inside_flags[:, None].expand( -1, approxs_per_octave).reshape(-1) approxs = flat_approxs[expand_inside_flags, :] squares = flat_squares[inside_flags, :] bbox_assigner = build_assigner(cfg.ga_assigner) assign_result = bbox_assigner.assign(approxs, squares, approxs_per_octave, gt_bboxes, gt_bboxes_ignore) if sampling: bbox_sampler = build_sampler(cfg.ga_sampler) else: bbox_sampler = PseudoSampler() sampling_result = bbox_sampler.sample(assign_result, squares, gt_bboxes) bbox_anchors = torch.zeros_like(squares) bbox_gts = torch.zeros_like(squares) bbox_weights = torch.zeros_like(squares) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes bbox_weights[pos_inds, :] = 1.0 # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_squares.size(0) bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags) bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags) bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags) return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)
11,809
40.006944
79
py
AlignShift
AlignShift-master/mmdet/core/anchor/point_generator.py
import torch class PointGenerator(object): def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_points(self, featmap_size, stride=16, device='cuda'): feat_h, feat_w = featmap_size shift_x = torch.arange(0., feat_w, device=device) * stride shift_y = torch.arange(0., feat_h, device=device) * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0], ), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size, valid_size, device='cuda'): feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device) valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid
1,287
35.8
71
py
AlignShift
AlignShift-master/mmdet/core/anchor/anchor_generator.py
import torch class AnchorGenerator(object): """ Examples: >>> from mmdet.core import AnchorGenerator >>> self = AnchorGenerator(9, [1.], [1.]) >>> all_anchors = self.grid_anchors((2, 2), device='cpu') >>> print(all_anchors) tensor([[ 0., 0., 8., 8.], [16., 0., 24., 8.], [ 0., 16., 8., 24.], [16., 16., 24., 24.]]) """ def __init__(self, base_size, scales, ratios, scale_major=True, ctr=None): self.base_size = base_size self.scales = torch.Tensor(scales) self.ratios = torch.Tensor(ratios) self.scale_major = scale_major self.ctr = ctr self.base_anchors = self.gen_base_anchors() @property def num_base_anchors(self): return self.base_anchors.size(0) def gen_base_anchors(self): w = self.base_size h = self.base_size if self.ctr is None: x_ctr = 0.5 * (w -1)#hyadd y_ctr = 0.5 * (h -1)#hyadd else: x_ctr, y_ctr = self.ctr h_ratios = torch.sqrt(self.ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * self.scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * self.scales[None, :]).view(-1) else: ws = (w * self.scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * self.scales[:, None] * h_ratios[None, :]).view(-1) # yapf: disable base_anchors = torch.stack( [ x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1) ], dim=-1).round() # yapf: enable return base_anchors def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_anchors(self, featmap_size, stride=16, device='cuda'): base_anchors = self.base_anchors.to(device) feat_h, feat_w = featmap_size shift_x = torch.arange(0, feat_w, device=device) * stride shift_y = torch.arange(0, feat_h, device=device) * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) shifts = shifts.type_as(base_anchors) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def valid_flags(self, featmap_size, valid_size, device='cuda'): feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device) valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = valid[:, None].expand(valid.size(0), self.num_base_anchors).contiguous().view(-1) return valid
3,599
35.363636
79
py
AlignShift
AlignShift-master/mmdet/core/anchor/point_target.py
import torch from ..bbox import PseudoSampler, assign_and_sample, build_assigner from ..utils import multi_apply def point_target(proposals_list, valid_flag_list, gt_bboxes_list, img_metas, cfg, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, sampling=True, unmap_outputs=True): """Compute corresponding GT box and classification targets for proposals. Args: points_list (list[list]): Multi level points of each image. valid_flag_list (list[list]): Multi level valid flags of each image. gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. img_metas (list[dict]): Meta info of each image. cfg (dict): train sample configs. Returns: tuple """ num_imgs = len(img_metas) assert len(proposals_list) == len(valid_flag_list) == num_imgs # points number of multi levels num_level_proposals = [points.size(0) for points in proposals_list[0]] # concat all level points and flags to a single tensor for i in range(num_imgs): assert len(proposals_list[i]) == len(valid_flag_list[i]) proposals_list[i] = torch.cat(proposals_list[i]) valid_flag_list[i] = torch.cat(valid_flag_list[i]) # compute targets for each image if gt_bboxes_ignore_list is None: gt_bboxes_ignore_list = [None for _ in range(num_imgs)] if gt_labels_list is None: gt_labels_list = [None for _ in range(num_imgs)] (all_labels, all_label_weights, all_bbox_gt, all_proposals, all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply( point_target_single, proposals_list, valid_flag_list, gt_bboxes_list, gt_bboxes_ignore_list, gt_labels_list, cfg=cfg, label_channels=label_channels, sampling=sampling, unmap_outputs=unmap_outputs) # no valid points if any([labels is None for labels in all_labels]): return None # sampled points of all images num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list]) num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list]) labels_list = images_to_levels(all_labels, num_level_proposals) label_weights_list = images_to_levels(all_label_weights, num_level_proposals) bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals) proposals_list = images_to_levels(all_proposals, num_level_proposals) proposal_weights_list = images_to_levels(all_proposal_weights, num_level_proposals) return (labels_list, label_weights_list, bbox_gt_list, proposals_list, proposal_weights_list, num_total_pos, num_total_neg) def images_to_levels(target, num_level_grids): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_level_grids: end = start + n level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets def point_target_single(flat_proposals, valid_flags, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg, label_channels=1, sampling=True, unmap_outputs=True): inside_flags = valid_flags if not inside_flags.any(): return (None, ) * 7 # assign gt and sample proposals proposals = flat_proposals[inside_flags, :] if sampling: assign_result, sampling_result = assign_and_sample( proposals, gt_bboxes, gt_bboxes_ignore, None, cfg) else: bbox_assigner = build_assigner(cfg.assigner) assign_result = bbox_assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore, gt_labels) bbox_sampler = PseudoSampler() sampling_result = bbox_sampler.sample(assign_result, proposals, gt_bboxes) num_valid_proposals = proposals.shape[0] bbox_gt = proposals.new_zeros([num_valid_proposals, 4]) pos_proposals = torch.zeros_like(proposals) proposals_weights = proposals.new_zeros([num_valid_proposals, 4]) labels = proposals.new_zeros(num_valid_proposals, dtype=torch.long) label_weights = proposals.new_zeros(num_valid_proposals, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_gt_bboxes = sampling_result.pos_gt_bboxes bbox_gt[pos_inds, :] = pos_gt_bboxes pos_proposals[pos_inds, :] = proposals[pos_inds, :] proposals_weights[pos_inds, :] = 1.0 if gt_labels is None: labels[pos_inds] = 1 else: labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] if cfg.pos_weight <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = cfg.pos_weight if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 # map up to original set of proposals if unmap_outputs: num_total_proposals = flat_proposals.size(0) labels = unmap(labels, num_total_proposals, inside_flags) label_weights = unmap(label_weights, num_total_proposals, inside_flags) bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags) pos_proposals = unmap(pos_proposals, num_total_proposals, inside_flags) proposals_weights = unmap(proposals_weights, num_total_proposals, inside_flags) return (labels, label_weights, bbox_gt, pos_proposals, proposals_weights, pos_inds, neg_inds) def unmap(data, count, inds, fill=0): """ Unmap a subset of item (data) back to the original set of items (of size count) """ if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds, :] = data return ret
6,441
37.807229
79
py
AlignShift
AlignShift-master/mmdet/models/builder.py
from torch import nn from mmdet.utils import build_from_cfg from .registry import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS) def build(cfg, registry, default_args=None): if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return nn.Sequential(*modules) else: return build_from_cfg(cfg, registry, default_args) def build_backbone(cfg): return build(cfg, BACKBONES) def build_neck(cfg): return build(cfg, NECKS) def build_roi_extractor(cfg): return build(cfg, ROI_EXTRACTORS) def build_shared_head(cfg): return build(cfg, SHARED_HEADS) def build_head(cfg): return build(cfg, HEADS) def build_loss(cfg): return build(cfg, LOSSES) def build_detector(cfg, train_cfg=None, test_cfg=None): return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
959
20.818182
78
py
AlignShift
AlignShift-master/mmdet/models/detectors/two_stage.py
import torch import torch.nn as nn from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from .. import builder from ..registry import DETECTORS from .base import BaseDetector from .test_mixins import BBoxTestMixin, MaskTestMixin, RPNTestMixin @DETECTORS.register_module class TwoStageDetector(BaseDetector, RPNTestMixin, BBoxTestMixin, MaskTestMixin): """Base class for two-stage detectors. Two-stage detectors typically consisting of a region proposal network and a task-specific regression head. """ def __init__(self, backbone, neck=None, shared_head=None, rpn_head=None, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, train_cfg=None, test_cfg=None, pretrained=None): super(TwoStageDetector, self).__init__() self.backbone = builder.build_backbone(backbone) if neck is not None: self.neck = builder.build_neck(neck) if shared_head is not None: self.shared_head = builder.build_shared_head(shared_head) if rpn_head is not None: self.rpn_head = builder.build_head(rpn_head) if bbox_head is not None: self.bbox_roi_extractor = builder.build_roi_extractor( bbox_roi_extractor) self.bbox_head = builder.build_head(bbox_head) if mask_head is not None: if mask_roi_extractor is not None: self.mask_roi_extractor = builder.build_roi_extractor( mask_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.mask_head = builder.build_head(mask_head) self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained) @property def with_rpn(self): return hasattr(self, 'rpn_head') and self.rpn_head is not None def init_weights(self, pretrained=None): super(TwoStageDetector, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() if self.with_shared_head: self.shared_head.init_weights(pretrained=pretrained) if self.with_rpn: self.rpn_head.init_weights() if self.with_bbox: self.bbox_roi_extractor.init_weights() self.bbox_head.init_weights() if self.with_mask: self.mask_head.init_weights() if not self.share_roi_extractor: self.mask_roi_extractor.init_weights() def extract_feat(self, img): """Directly extract features from the backbone+neck """ x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmedetection/tools/get_flops.py` """ outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).cuda() # bbox head rois = bbox2roi([proposals]) if self.with_bbox: bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) outs = outs + (cls_score, bbox_pred) # mask head if self.with_mask: mask_rois = rois[:100] mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) mask_pred = self.mask_head(mask_feats) outs = outs + (mask_pred, ) return outs def forward_train(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_meta (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and my also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. proposals : override rpn proposals with custom proposals. Use when `with_rpn` is False. Returns: dict[str, Tensor]: a dictionary of loss components """ x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals # assign gts and sample proposals if self.with_bbox or self.with_mask: bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) bbox_sampler = build_sampler( self.train_cfg.rcnn.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss if self.with_bbox: rois = bbox2roi([res.bboxes for res in sampling_results]) # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) # print(cls_score.shape, bbox_targets[0].shape, bbox_targets[2].shape) # print(cls_score[0], bbox_pred[0], bbox_targets[0][0],bbox_targets[2][0]) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets) losses.update(loss_bbox) # mask head forward and loss if self.with_mask: if not self.share_roi_extractor: pos_rois = bbox2roi( [res.pos_bboxes for res in sampling_results]) mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_feats = bbox_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_targets = self.mask_head.get_target(sampling_results, gt_masks, self.train_cfg.rcnn) pos_labels = torch.cat( [res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels) losses.update(loss_mask) return losses def simple_test(self, img, img_meta, z_spacing=None, proposals=None, rescale=False, **kwargs):#hyadd """Test without augmentation.""" assert self.with_bbox, "Bbox head must be implemented." x = self.extract_feat(img) proposal_list = self.simple_test_rpn( x, img_meta, self.test_cfg.rpn) if proposals is None else proposals det_bboxes, det_labels = self.simple_test_bboxes( x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=rescale) # print(proposal_list[0][0], det_bboxes.shape,det_labels.shape) bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) if not self.with_mask: return bbox_results else: segm_results = self.simple_test_mask( x, img_meta, det_bboxes, det_labels, rescale=rescale) return bbox_results, segm_results def aug_test(self, imgs, img_metas, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ # recompute feats to save memory proposal_list = self.aug_test_rpn( self.extract_feats(imgs), img_metas, self.test_cfg.rpn) det_bboxes, det_labels = self.aug_test_bboxes( self.extract_feats(imgs), img_metas, proposal_list, self.test_cfg.rcnn) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor'] bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes) # det_bboxes always keep the original scale if self.with_mask: segm_results = self.aug_test_mask( self.extract_feats(imgs), img_metas, det_bboxes, det_labels) return bbox_results, segm_results else: return bbox_results def forward_debug(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_meta (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and my also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. proposals : override rpn proposals with custom proposals. Use when `with_rpn` is False. Returns: dict[str, Tensor]: a dictionary of loss components """ x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals # assign gts and sample proposals if self.with_bbox or self.with_mask: bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) bbox_sampler = build_sampler( self.train_cfg.rcnn.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss if self.with_bbox: rois = bbox2roi([res.bboxes for res in sampling_results]) # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) # print(cls_score.shape, bbox_targets[0].shape, bbox_targets[2].shape) # print(cls_score[0], bbox_pred[0], bbox_targets[0][0],bbox_targets[2][0]) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets) losses.update(loss_bbox) # mask head forward and loss if self.with_mask: if not self.share_roi_extractor: pos_rois = bbox2roi( [res.pos_bboxes for res in sampling_results]) mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_feats = bbox_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_targets = self.mask_head.get_target(sampling_results, gt_masks, self.train_cfg.rcnn) pos_labels = torch.cat( [res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels) losses.update(loss_mask) return losses, proposal_list, sampling_results, rois # def forward_val(self, # img, # img_meta, # gt_bboxes, # gt_labels, # gt_bboxes_ignore=None, # gt_masks=None, # proposals=None): # """ # Args: # img (Tensor): of shape (N, C, H, W) encoding input images. # Typically these should be mean centered and std scaled. # img_meta (list[dict]): list of image info dict where each dict has: # 'img_shape', 'scale_factor', 'flip', and my also contain # 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. # For details on the values of these keys see # `mmdet/datasets/pipelines/formatting.py:Collect`. # gt_bboxes (list[Tensor]): each item are the truth boxes for each # image in [tl_x, tl_y, br_x, br_y] format. # gt_labels (list[Tensor]): class indices corresponding to each box # gt_bboxes_ignore (None | list[Tensor]): specify which bounding # boxes can be ignored when computing the loss. # gt_masks (None | Tensor) : true segmentation masks for each box # used if the architecture supports a segmentation task. # proposals : override rpn proposals with custom proposals. Use when # `with_rpn` is False. # Returns: # dict[str, Tensor]: a dictionary of loss components # """ # x = self.extract_feat(img) # losses = dict() # # RPN forward and loss # if self.with_rpn: # rpn_outs = self.rpn_head(x) # rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, # self.train_cfg.rpn) # rpn_losses = self.rpn_head.loss( # *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) # losses.update(rpn_losses) # proposal_cfg = self.train_cfg.get('rpn_proposal', # self.test_cfg.rpn) # proposal_inputs = rpn_outs + (img_meta, proposal_cfg) # proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) # else: # proposal_list = proposals # # assign gts and sample proposals # if self.with_bbox or self.with_mask: # bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) # bbox_sampler = build_sampler( # self.train_cfg.rcnn.sampler, context=self) # num_imgs = img.size(0) # if gt_bboxes_ignore is None: # gt_bboxes_ignore = [None for _ in range(num_imgs)] # sampling_results = [] # for i in range(num_imgs): # assign_result = bbox_assigner.assign(proposal_list[i], # gt_bboxes[i], # gt_bboxes_ignore[i], # gt_labels[i]) # sampling_result = bbox_sampler.sample( # assign_result, # proposal_list[i], # gt_bboxes[i], # gt_labels[i], # feats=[lvl_feat[i][None] for lvl_feat in x]) # sampling_results.append(sampling_result) # # bbox head forward and loss # if self.with_bbox: # rois = bbox2roi([res.bboxes for res in sampling_results]) # # TODO: a more flexible way to decide which feature maps to use # bbox_feats = self.bbox_roi_extractor( # x[:self.bbox_roi_extractor.num_inputs], rois) # if self.with_shared_head: # bbox_feats = self.shared_head(bbox_feats) # cls_score, bbox_pred = self.bbox_head(bbox_feats) # bbox_targets = self.bbox_head.get_target(sampling_results, # gt_bboxes, gt_labels, # self.train_cfg.rcnn) # loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, # *bbox_targets) # losses.update(loss_bbox) # # mask head forward and loss # if self.with_mask: # if not self.share_roi_extractor: # pos_rois = bbox2roi( # [res.pos_bboxes for res in sampling_results]) # mask_feats = self.mask_roi_extractor( # x[:self.mask_roi_extractor.num_inputs], pos_rois) # if self.with_shared_head: # mask_feats = self.shared_head(mask_feats) # else: # pos_inds = [] # device = bbox_feats.device # for res in sampling_results: # pos_inds.append( # torch.ones( # res.pos_bboxes.shape[0], # device=device, # dtype=torch.uint8)) # pos_inds.append( # torch.zeros( # res.neg_bboxes.shape[0], # device=device, # dtype=torch.uint8)) # pos_inds = torch.cat(pos_inds) # mask_feats = bbox_feats[pos_inds] # mask_pred = self.mask_head(mask_feats) # mask_targets = self.mask_head.get_target(sampling_results, # gt_masks, # self.train_cfg.rcnn) # pos_labels = torch.cat( # [res.pos_gt_labels for res in sampling_results]) # loss_mask = self.mask_head.loss(mask_pred, mask_targets, # pos_labels) # losses.update(loss_mask) # return cls_score, bbox_pred, mask_pred, losses
24,777
41.64716
126
py
AlignShift
AlignShift-master/mmdet/models/detectors/base.py
import logging from abc import ABCMeta, abstractmethod import mmcv import numpy as np import pycocotools.mask as maskUtils import torch.nn as nn from mmdet.core import auto_fp16, get_classes, tensor2imgs class BaseDetector(nn.Module): """Base class for detectors""" __metaclass__ = ABCMeta def __init__(self): super(BaseDetector, self).__init__() self.fp16_enabled = False @property def with_neck(self): return hasattr(self, 'neck') and self.neck is not None @property def with_shared_head(self): return hasattr(self, 'shared_head') and self.shared_head is not None @property def with_bbox(self): return hasattr(self, 'bbox_head') and self.bbox_head is not None @property def with_mask(self): return hasattr(self, 'mask_head') and self.mask_head is not None @abstractmethod def extract_feat(self, imgs): pass def extract_feats(self, imgs): assert isinstance(imgs, list) for img in imgs: yield self.extract_feat(img) @abstractmethod def forward_train(self, imgs, img_metas, **kwargs): """ Args: img (list[Tensor]): list of tensors of shape (1, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and my also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. **kwargs: specific to concrete implementation """ pass @abstractmethod def simple_test(self, img, img_meta, **kwargs): pass @abstractmethod def aug_test(self, imgs, img_metas, **kwargs): pass def init_weights(self, pretrained=None): if pretrained is not None: logger = logging.getLogger() logger.info('load model from: {}'.format(pretrained)) def forward_test(self, imgs, img_metas, **kwargs): """ Args: imgs (List[Tensor]): the outer list indicates test-time augmentations and inner Tensor should have a shape NxCxHxW, which contains all images in the batch. img_meta (List[List[dict]]): the outer list indicates test-time augs (multiscale, flip, etc.) and the inner list indicates images in a batch """ # for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: # if not isinstance(var, list): # raise TypeError('{} must be a list, but got {}'.format( # name, type(var))) # num_augs = len(imgs) # if num_augs != len(img_metas): # raise ValueError( # 'num of augmentations ({}) != num of image meta ({})'.format( # len(imgs), len(img_metas))) # # TODO: remove the restriction of imgs_per_gpu == 1 when prepared # imgs_per_gpu = imgs[0].size(0) # assert imgs_per_gpu == 1 # if num_augs == 1: # return self.simple_test(imgs[0], img_metas[0], **kwargs) # else: # return self.aug_test(imgs, img_metas, **kwargs) return self.simple_test(imgs, img_metas, **kwargs) @auto_fp16(apply_to=('img', )) def forward(self, img, img_meta, return_loss=True, debug=False, **kwargs): """ Calls either forward_train or forward_test depending on whether return_loss=True. Note this setting will change the expected inputs. When `return_loss=False`, img and img_meta are single-nested (i.e. Tensor and List[dict]), and when `resturn_loss=True`, img and img_meta should be double nested (i.e. List[Tensor], List[List[dict]]), with the outer list indicating test time augmentations. """ if return_loss: return self.forward_train(img, img_meta, **kwargs) elif debug: return self.forward_debug(img, img_meta, **kwargs)#hyadd else: return self.forward_test(img, img_meta, **kwargs) def show_result(self, data, result, dataset=None, score_thr=0.3): if isinstance(result, tuple): bbox_result, segm_result = result else: bbox_result, segm_result = result, None img_tensor = data['img'].data print(img_tensor.shape) img_metas = data['img_meta'].data[0] print(img_metas) imgs = tensor2imgs(img_tensor, **{'mean': np.array([0.5, 0.5, 0.5]), 'std': np.array([255., 255., 255.]), 'to_rgb': False}) assert len(imgs) == len(img_metas) if dataset is None: class_names = self.CLASSES elif isinstance(dataset, str): class_names = get_classes(dataset) elif isinstance(dataset, (list, tuple)): class_names = dataset else: raise TypeError( 'dataset must be a valid dataset name or a sequence' ' of class names, not {}'.format(type(dataset))) for img, img_meta in zip(imgs, img_metas): h, w, _ = img_meta['img_shape'] img_show = img[:h, :w, :] bboxes = np.vstack(bbox_result) # draw segmentation masks if segm_result is not None: segms = mmcv.concat_list(segm_result) print(segms) inds = np.where(bboxes[:, -1] > score_thr)[0] for i in inds: color_mask = np.random.randint( 0, 256, (1, 3), dtype=np.uint8) mask = maskUtils.decode(segms[i]).astype(np.bool) print('amsk shape', mask.shape) img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5 # draw bounding boxes labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) mmcv.imshow_det_bboxes( img_show, bboxes, labels, class_names=class_names, score_thr=score_thr)
6,397
35.352273
131
py
AlignShift
AlignShift-master/mmdet/models/detectors/single_stage.py
import torch.nn as nn from mmdet.core import bbox2result from .. import builder from ..registry import DETECTORS from .base import BaseDetector import numpy as np @DETECTORS.register_module class SingleStageDetector(BaseDetector): """Base class for single-stage detectors. Single-stage detectors directly and densely predict bounding boxes on the output features of the backbone+neck. """ def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, pretrained=None): super(SingleStageDetector, self).__init__() self.backbone = builder.build_backbone(backbone) if neck is not None: self.neck = builder.build_neck(neck) self.bbox_head = builder.build_head(bbox_head) if mask_head is not None: self.mask_head = builder.build_head(mask_head) self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained) def init_weights(self, pretrained=None): super(SingleStageDetector, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() self.bbox_head.init_weights() def extract_feat(self, img): """Directly extract features from the backbone+neck """ x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmedetection/tools/get_flops.py` """ x = self.extract_feat(img) outs = self.bbox_head(x) return outs def forward_train(self, img, img_metas, gt_bboxes, gt_labels, gt_masks, gt_bboxes_ignore=None): x = self.extract_feat(img) outs = self.bbox_head(x) mask = self.mask_head(x[0], return_feat=False) loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg) losses = self.bbox_head.loss( *loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) print(gt_masks) print(losses) loss_mask = self.mask_head.loss(mask.flatten(), gt_masks.flatten(), gt_labels) print(loss_mask) loss_all = dict(**losses, **loss_mask) print(loss_all) return loss_all def simple_test(self, img, img_meta, **kwargs): x = self.extract_feat(img) outs = self.bbox_head(x) bbox_inputs = outs + (img_meta, self.test_cfg) bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, rescale=True)[0] bbox_results = [b.cpu().numpy() if b.shape[0] != 0 else np.array([[0, 0, 0, 0]]) for b in bbox_list] if kwargs.get('gt_bboxes'): loss_inputs = outs + (kwargs['gt_bboxes'], kwargs['gt_labels'], img_meta, self.train_cfg) losses = self.bbox_head.loss(*loss_inputs, gt_bboxes_ignore=None) return losses, bbox_results[0] else: return bbox_results[0] # def simple_test(self, img, img_meta, rescale=False): # x = self.extract_feat(img) # outs = self.bbox_head(x) # bbox_inputs = outs + (img_meta, self.test_cfg, rescale) # bbox_list = self.bbox_head.get_bboxes(*bbox_inputs, rescale=True) # bbox_results = [ # bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) # for det_bboxes, det_labels in bbox_list # ] # return bbox_results[0] def aug_test(self, imgs, img_metas, rescale=False): raise NotImplementedError
4,032
34.690265
108
py
AlignShift
AlignShift-master/mmdet/models/detectors/reppoints_detector.py
import torch from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms from ..registry import DETECTORS from .single_stage import SingleStageDetector @DETECTORS.register_module class RepPointsDetector(SingleStageDetector): """RepPoints: Point Set Representation for Object Detection. This detector is the implementation of: - RepPoints detector (https://arxiv.org/pdf/1904.11490) """ def __init__(self, backbone, neck, bbox_head, train_cfg=None, test_cfg=None, pretrained=None): super(RepPointsDetector, self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg, pretrained) def merge_aug_results(self, aug_bboxes, aug_scores, img_metas): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip) recovered_bboxes.append(bboxes) bboxes = torch.cat(recovered_bboxes, dim=0) if aug_scores is None: return bboxes else: scores = torch.cat(aug_scores, dim=0) return bboxes, scores def aug_test(self, imgs, img_metas, rescale=False): # recompute feats to save memory feats = self.extract_feats(imgs) aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch outs = self.bbox_head(x) bbox_inputs = outs + (img_meta, self.test_cfg, False, False) det_bboxes, det_scores = self.bbox_head.get_bboxes(*bbox_inputs)[0] aug_bboxes.append(det_bboxes) aug_scores.append(det_scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = self.merge_aug_results( aug_bboxes, aug_scores, img_metas) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, self.test_cfg.score_thr, self.test_cfg.nms, self.test_cfg.max_per_img) if rescale: _det_bboxes = det_bboxes else: _det_bboxes = det_bboxes.clone() _det_bboxes[:, :4] *= img_metas[0][0]['scale_factor'] bbox_results = bbox2result(_det_bboxes, det_labels, self.bbox_head.num_classes) return bbox_results
3,089
36.682927
79
py
AlignShift
AlignShift-master/mmdet/models/detectors/cascade_rcnn.py
from __future__ import division import torch import torch.nn as nn from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from .. import builder from ..registry import DETECTORS from .base import BaseDetector from .test_mixins import RPNTestMixin @DETECTORS.register_module class CascadeRCNN(BaseDetector, RPNTestMixin): def __init__(self, num_stages, backbone, neck=None, shared_head=None, rpn_head=None, bbox_roi_extractor=None, bbox_head=None, mask_roi_extractor=None, mask_head=None, train_cfg=None, test_cfg=None, pretrained=None): assert bbox_roi_extractor is not None assert bbox_head is not None super(CascadeRCNN, self).__init__() self.num_stages = num_stages self.backbone = builder.build_backbone(backbone) if neck is not None: self.neck = builder.build_neck(neck) if rpn_head is not None: self.rpn_head = builder.build_head(rpn_head) if shared_head is not None: self.shared_head = builder.build_shared_head(shared_head) if bbox_head is not None: self.bbox_roi_extractor = nn.ModuleList() self.bbox_head = nn.ModuleList() if not isinstance(bbox_roi_extractor, list): bbox_roi_extractor = [ bbox_roi_extractor for _ in range(num_stages) ] if not isinstance(bbox_head, list): bbox_head = [bbox_head for _ in range(num_stages)] assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages for roi_extractor, head in zip(bbox_roi_extractor, bbox_head): self.bbox_roi_extractor.append( builder.build_roi_extractor(roi_extractor)) self.bbox_head.append(builder.build_head(head)) if mask_head is not None: self.mask_head = nn.ModuleList() if not isinstance(mask_head, list): mask_head = [mask_head for _ in range(num_stages)] assert len(mask_head) == self.num_stages for head in mask_head: self.mask_head.append(builder.build_head(head)) if mask_roi_extractor is not None: self.share_roi_extractor = False self.mask_roi_extractor = nn.ModuleList() if not isinstance(mask_roi_extractor, list): mask_roi_extractor = [ mask_roi_extractor for _ in range(num_stages) ] assert len(mask_roi_extractor) == self.num_stages for roi_extractor in mask_roi_extractor: self.mask_roi_extractor.append( builder.build_roi_extractor(roi_extractor)) else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained) @property def with_rpn(self): return hasattr(self, 'rpn_head') and self.rpn_head is not None def init_weights(self, pretrained=None): super(CascadeRCNN, self).init_weights(pretrained) self.backbone.init_weights(pretrained=pretrained) if self.with_neck: if isinstance(self.neck, nn.Sequential): for m in self.neck: m.init_weights() else: self.neck.init_weights() if self.with_rpn: self.rpn_head.init_weights() if self.with_shared_head: self.shared_head.init_weights(pretrained=pretrained) for i in range(self.num_stages): if self.with_bbox: self.bbox_roi_extractor[i].init_weights() self.bbox_head[i].init_weights() if self.with_mask: if not self.share_roi_extractor: self.mask_roi_extractor[i].init_weights() self.mask_head[i].init_weights() def extract_feat(self, img): x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).cuda() # bbox heads rois = bbox2roi([proposals]) if self.with_bbox: for i in range(self.num_stages): bbox_feats = self.bbox_roi_extractor[i]( x[:self.bbox_roi_extractor[i].num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head[i](bbox_feats) outs = outs + (cls_score, bbox_pred) # mask heads if self.with_mask: mask_rois = rois[:100] for i in range(self.num_stages): mask_feats = self.mask_roi_extractor[i]( x[:self.mask_roi_extractor[i].num_inputs], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) mask_pred = self.mask_head[i](mask_feats) outs = outs + (mask_pred, ) return outs def forward_train(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None): """ Args: img (Tensor): of shape (N, C, H, W) encoding input images. Typically these should be mean centered and std scaled. img_meta (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and my also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. gt_bboxes (list[Tensor]): each item are the truth boxes for each image in [tl_x, tl_y, br_x, br_y] format. gt_labels (list[Tensor]): class indices corresponding to each box gt_bboxes_ignore (None | list[Tensor]): specify which bounding boxes can be ignored when computing the loss. gt_masks (None | Tensor) : true segmentation masks for each box used if the architecture supports a segmentation task. proposals : override rpn proposals with custom proposals. Use when `with_rpn` is False. Returns: dict[str, Tensor]: a dictionary of loss components """ x = self.extract_feat(img) losses = dict() if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg.rcnn[i] lw = self.train_cfg.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] if self.with_bbox or self.with_mask: bbox_assigner = build_assigner(rcnn_train_cfg.assigner) bbox_sampler = build_sampler( rcnn_train_cfg.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss bbox_roi_extractor = self.bbox_roi_extractor[i] bbox_head = self.bbox_head[i] rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = bbox_head(bbox_feats) bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) for name, value in loss_bbox.items(): losses['s{}.{}'.format(i, name)] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: if not self.share_roi_extractor: mask_roi_extractor = self.mask_roi_extractor[i] pos_rois = bbox2roi( [res.pos_bboxes for res in sampling_results]) mask_feats = mask_roi_extractor( x[:mask_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: # reuse positive bbox feats pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_feats = bbox_feats[pos_inds] mask_head = self.mask_head[i] mask_pred = mask_head(mask_feats) mask_targets = mask_head.get_target(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat( [res.pos_gt_labels for res in sampling_results]) loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) for name, value in loss_mask.items(): losses['s{}.{}'.format(i, name)] = ( value * lw if 'loss' in name else value) # refine bboxes if i < self.num_stages - 1: pos_is_gts = [res.pos_is_gt for res in sampling_results] roi_labels = bbox_targets[0] # bbox_targets is a tuple with torch.no_grad(): proposal_list = bbox_head.refine_bboxes( rois, roi_labels, bbox_pred, pos_is_gts, img_meta) return losses def simple_test(self, img, img_meta, proposals=None, rescale=False): """Run inference on a single image. Args: img (Tensor): must be in shape (N, C, H, W) img_meta (list[dict]): a list with one dictionary element. See `mmdet/datasets/pipelines/formatting.py:Collect` for details of meta dicts. proposals : if specified overrides rpn proposals rescale (bool): if True returns boxes in original image space Returns: dict: results """ x = self.extract_feat(img) proposal_list = self.simple_test_rpn( x, img_meta, self.test_cfg.rpn) if proposals is None else proposals img_shape = img_meta[0]['img_shape'] ori_shape = img_meta[0]['ori_shape'] scale_factor = img_meta[0]['scale_factor'] # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg.rcnn rois = bbox2roi(proposal_list) for i in range(self.num_stages): bbox_roi_extractor = self.bbox_roi_extractor[i] bbox_head = self.bbox_head[i] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = bbox_head(bbox_feats) ms_scores.append(cls_score) if self.test_cfg.keep_all_stages: det_bboxes, det_labels = bbox_head.get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) bbox_result = bbox2result(det_bboxes, det_labels, bbox_head.num_classes) ms_bbox_result['stage{}'.format(i)] = bbox_result if self.with_mask: mask_roi_extractor = self.mask_roi_extractor[i] mask_head = self.mask_head[i] if det_bboxes.shape[0] == 0: mask_classes = mask_head.num_classes - 1 segm_result = [[] for _ in range(mask_classes)] else: _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats, i) mask_pred = mask_head(mask_feats) segm_result = mask_head.get_seg_masks( mask_pred, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale) ms_segm_result['stage{}'.format(i)] = segm_result if i < self.num_stages - 1: bbox_label = cls_score.argmax(dim=1) rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, img_meta[0]) cls_score = sum(ms_scores) / self.num_stages det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) ms_bbox_result['ensemble'] = bbox_result if self.with_mask: if det_bboxes.shape[0] == 0: mask_classes = self.mask_head[-1].num_classes - 1 segm_result = [[] for _ in range(mask_classes)] else: if isinstance(scale_factor, float): # aspect ratio fixed _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) else: _bboxes = ( det_bboxes[:, :4] * torch.from_numpy(scale_factor).to(det_bboxes.device) if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) aug_masks = [] for i in range(self.num_stages): mask_roi_extractor = self.mask_roi_extractor[i] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) mask_pred = self.mask_head[i](mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, [img_meta] * self.num_stages, self.test_cfg.rcnn) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale) ms_segm_result['ensemble'] = segm_result if not self.test_cfg.keep_all_stages: if self.with_mask: results = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) else: results = ms_bbox_result['ensemble'] else: if self.with_mask: results = { stage: (ms_bbox_result[stage], ms_segm_result[stage]) for stage in ms_bbox_result } else: results = ms_bbox_result return results def aug_test(self, imgs, img_metas, proposals=None, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ # recompute feats to save memory proposal_list = self.aug_test_rpn( self.extract_feats(imgs), img_metas, self.test_cfg.rpn) rcnn_test_cfg = self.test_cfg.rcnn aug_bboxes = [] aug_scores = [] for x, img_meta in zip(self.extract_feats(imgs), img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) for i in range(self.num_stages): bbox_roi_extractor = self.bbox_roi_extractor[i] bbox_head = self.bbox_head[i] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = bbox_head(bbox_feats) ms_scores.append(cls_score) if i < self.num_stages - 1: bbox_label = cls_score.argmax(dim=1) rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head[-1].num_classes - 1)] else: aug_masks = [] aug_img_metas = [] for x, img_meta in zip(self.extract_feats(imgs), img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) for i in range(self.num_stages): mask_feats = self.mask_roi_extractor[i]( x[:len(self.mask_roi_extractor[i].featmap_strides )], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) mask_pred = self.mask_head[i](mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg.rcnn) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return bbox_result, segm_result else: return bbox_result def show_result(self, data, result, **kwargs): if self.with_mask: ms_bbox_result, ms_segm_result = result if isinstance(ms_bbox_result, dict): result = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) else: if isinstance(result, dict): result = result['ensemble'] super(CascadeRCNN, self).show_result(data, result, **kwargs)
23,674
41.276786
79
py
AlignShift
AlignShift-master/mmdet/models/detectors/grid_rcnn.py
import torch from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from .. import builder from ..registry import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module class GridRCNN(TwoStageDetector): """Grid R-CNN. This detector is the implementation of: - Grid R-CNN (https://arxiv.org/abs/1811.12030) - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) """ def __init__(self, backbone, rpn_head, bbox_roi_extractor, bbox_head, grid_roi_extractor, grid_head, train_cfg, test_cfg, neck=None, shared_head=None, pretrained=None): assert grid_head is not None super(GridRCNN, self).__init__( backbone=backbone, neck=neck, shared_head=shared_head, rpn_head=rpn_head, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) if grid_roi_extractor is not None: self.grid_roi_extractor = builder.build_roi_extractor( grid_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.grid_roi_extractor = self.bbox_roi_extractor self.grid_head = builder.build_head(grid_head) self.init_extra_weights() def init_extra_weights(self): self.grid_head.init_weights() if not self.share_roi_extractor: self.grid_roi_extractor.init_weights() def _random_jitter(self, sampling_results, img_metas, amplitude=0.15): """Ramdom jitter positive proposals for training.""" for sampling_result, img_meta in zip(sampling_results, img_metas): bboxes = sampling_result.pos_bboxes random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_( -amplitude, amplitude) # before jittering cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2 wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs() # after jittering new_cxcy = cxcy + wh * random_offsets[:, :2] new_wh = wh * (1 + random_offsets[:, 2:]) # xywh to xyxy new_x1y1 = (new_cxcy - new_wh / 2) new_x2y2 = (new_cxcy + new_wh / 2) new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1) # clip bboxes max_shape = img_meta['img_shape'] if max_shape is not None: new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1) new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1) sampling_result.pos_bboxes = new_bboxes return sampling_results def forward_dummy(self, img): outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).cuda() # bbox head rois = bbox2roi([proposals]) bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) # grid head grid_rois = rois[:100] grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], grid_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) grid_pred = self.grid_head(grid_feats) return rpn_outs, cls_score, bbox_pred, grid_pred def forward_train(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None): x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals if self.with_bbox: # assign gts and sample proposals bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) bbox_sampler = build_sampler( self.train_cfg.rcnn.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss rois = bbox2roi([res.bboxes for res in sampling_results]) # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets) losses.update(loss_bbox) # Grid head forward and loss sampling_results = self._random_jitter(sampling_results, img_meta) pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) grid_feats = self.grid_roi_extractor( x[:self.grid_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: grid_feats = self.shared_head(grid_feats) # Accelerate training max_sample_num_grid = self.train_cfg.rcnn.get('max_num_grid', 192) sample_idx = torch.randperm( grid_feats.shape[0])[:min(grid_feats. shape[0], max_sample_num_grid)] grid_feats = grid_feats[sample_idx] grid_pred = self.grid_head(grid_feats) grid_targets = self.grid_head.get_target(sampling_results, self.train_cfg.rcnn) grid_targets = grid_targets[sample_idx] loss_grid = self.grid_head.loss(grid_pred, grid_targets) losses.update(loss_grid) return losses def simple_test(self, img, img_meta, proposals=None, rescale=False): """Test without augmentation.""" assert self.with_bbox, "Bbox head must be implemented." x = self.extract_feat(img) proposal_list = self.simple_test_rpn( x, img_meta, self.test_cfg.rpn) if proposals is None else proposals det_bboxes, det_labels = self.simple_test_bboxes( x, img_meta, proposal_list, self.test_cfg.rcnn, rescale=False) # pack rois into bboxes grid_rois = bbox2roi([det_bboxes[:, :4]]) grid_feats = self.grid_roi_extractor( x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois) if grid_rois.shape[0] != 0: self.grid_head.test_mode = True grid_pred = self.grid_head(grid_feats) det_bboxes = self.grid_head.get_bboxes(det_bboxes, grid_pred['fused'], img_meta) if rescale: det_bboxes[:, :4] /= img_meta[0]['scale_factor'] else: det_bboxes = torch.Tensor([]) bbox_results = bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes) return bbox_results
9,225
39.113043
79
py
AlignShift
AlignShift-master/mmdet/models/detectors/double_head_rcnn.py
import torch from mmdet.core import bbox2roi, build_assigner, build_sampler from ..registry import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module class DoubleHeadRCNN(TwoStageDetector): def __init__(self, reg_roi_scale_factor, **kwargs): super().__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def forward_dummy(self, img): outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).cuda() # bbox head rois = bbox2roi([proposals]) bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) outs += (cls_score, bbox_pred) return outs def forward_train(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None): x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals # assign gts and sample proposals if self.with_bbox or self.with_mask: bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) bbox_sampler = build_sampler( self.train_cfg.rcnn.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss if self.with_bbox: rois = bbox2roi([res.bboxes for res in sampling_results]) # TODO: a more flexible way to decide which feature maps to use bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets) losses.update(loss_bbox) # mask head forward and loss if self.with_mask: if not self.share_roi_extractor: pos_rois = bbox2roi( [res.pos_bboxes for res in sampling_results]) mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: pos_inds = [] device = bbox_cls_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_feats = bbox_cls_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_targets = self.mask_head.get_target(sampling_results, gt_masks, self.train_cfg.rcnn) pos_labels = torch.cat( [res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels) losses.update(loss_mask) return losses def simple_test_bboxes(self, x, img_meta, proposals, rcnn_test_cfg, rescale=False): """Test only det bboxes without augmentation.""" rois = bbox2roi(proposals) bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] det_bboxes, det_labels = self.bbox_head.get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) return det_bboxes, det_labels
7,453
40.642458
77
py
AlignShift
AlignShift-master/mmdet/models/detectors/htc.py
import torch import torch.nn.functional as F from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner, build_sampler, merge_aug_bboxes, merge_aug_masks, multiclass_nms) from .. import builder from ..registry import DETECTORS from .cascade_rcnn import CascadeRCNN @DETECTORS.register_module class HybridTaskCascade(CascadeRCNN): def __init__(self, num_stages, backbone, semantic_roi_extractor=None, semantic_head=None, semantic_fusion=('bbox', 'mask'), interleaved=True, mask_info_flow=True, **kwargs): super(HybridTaskCascade, self).__init__(num_stages, backbone, **kwargs) assert self.with_bbox and self.with_mask assert not self.with_shared_head # shared head not supported if semantic_head is not None: self.semantic_roi_extractor = builder.build_roi_extractor( semantic_roi_extractor) self.semantic_head = builder.build_head(semantic_head) self.semantic_fusion = semantic_fusion self.interleaved = interleaved self.mask_info_flow = mask_info_flow @property def with_semantic(self): if hasattr(self, 'semantic_head') and self.semantic_head is not None: return True else: return False def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat=None): rois = bbox2roi([res.bboxes for res in sampling_results]) bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs], rois) # semantic feature fusion # element-wise sum for original features and pooled semantic features if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = F.adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) bbox_targets = bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg) loss_bbox = bbox_head.loss(cls_score, bbox_pred, *bbox_targets) return loss_bbox, rois, bbox_targets, bbox_pred def _mask_forward_train(self, stage, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat=None): mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results]) mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs], pos_rois) # semantic feature fusion # element-wise sum for original features and pooled semantic features if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], pos_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat # mask information flow # forward all previous mask heads to obtain last_feat, and fuse it # with the normal mask feature if self.mask_info_flow: last_feat = None for i in range(stage): last_feat = self.mask_head[i]( mask_feats, last_feat, return_logits=False) mask_pred = mask_head(mask_feats, last_feat, return_feat=False) else: mask_pred = mask_head(mask_feats) mask_targets = mask_head.get_target(sampling_results, gt_masks, rcnn_train_cfg) pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results]) loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels) return loss_mask def _bbox_forward_test(self, stage, x, rois, semantic_feat=None): bbox_roi_extractor = self.bbox_roi_extractor[stage] bbox_head = self.bbox_head[stage] bbox_feats = bbox_roi_extractor( x[:len(bbox_roi_extractor.featmap_strides)], rois) if self.with_semantic and 'bbox' in self.semantic_fusion: bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat], rois) if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]: bbox_semantic_feat = F.adaptive_avg_pool2d( bbox_semantic_feat, bbox_feats.shape[-2:]) bbox_feats += bbox_semantic_feat cls_score, bbox_pred = bbox_head(bbox_feats) return cls_score, bbox_pred def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None): mask_roi_extractor = self.mask_roi_extractor[stage] mask_head = self.mask_head[stage] mask_rois = bbox2roi([bboxes]) mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor([semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat if self.mask_info_flow: last_feat = None last_pred = None for i in range(stage): mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat) if last_pred is not None: mask_pred = mask_pred + last_pred last_pred = mask_pred mask_pred = mask_head(mask_feats, last_feat, return_feat=False) if last_pred is not None: mask_pred = mask_pred + last_pred else: mask_pred = mask_head(mask_feats) return mask_pred def forward_dummy(self, img): outs = () # backbone x = self.extract_feat(img) # rpn if self.with_rpn: rpn_outs = self.rpn_head(x) outs = outs + (rpn_outs, ) proposals = torch.randn(1000, 4).cuda() # semantic head if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None # bbox heads rois = bbox2roi([proposals]) for i in range(self.num_stages): cls_score, bbox_pred = self._bbox_forward_test( i, x, rois, semantic_feat=semantic_feat) outs = outs + (cls_score, bbox_pred) # mask heads if self.with_mask: mask_rois = rois[:100] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats += mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) outs = outs + (mask_pred, ) return outs def forward_train(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, gt_semantic_seg=None, proposals=None): x = self.extract_feat(img) losses = dict() # RPN part, the same as normal two-stage detectors if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals # semantic segmentation part # 2 outputs: segmentation prediction and embedded features if self.with_semantic: semantic_pred, semantic_feat = self.semantic_head(x) loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg) losses['loss_semantic_seg'] = loss_seg else: semantic_feat = None for i in range(self.num_stages): self.current_stage = i rcnn_train_cfg = self.train_cfg.rcnn[i] lw = self.train_cfg.stage_loss_weights[i] # assign gts and sample proposals sampling_results = [] bbox_assigner = build_assigner(rcnn_train_cfg.assigner) bbox_sampler = build_sampler(rcnn_train_cfg.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] for j in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss loss_bbox, rois, bbox_targets, bbox_pred = \ self._bbox_forward_train( i, x, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg, semantic_feat) roi_labels = bbox_targets[0] for name, value in loss_bbox.items(): losses['s{}.{}'.format(i, name)] = ( value * lw if 'loss' in name else value) # mask head forward and loss if self.with_mask: # interleaved execution: use regressed bboxes by the box branch # to train the mask branch if self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( rois, roi_labels, bbox_pred, pos_is_gts, img_meta) # re-assign and sample 512 RoIs from 512 RoIs sampling_results = [] for j in range(num_imgs): assign_result = bbox_assigner.assign( proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j], gt_labels[j]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[j], gt_bboxes[j], gt_labels[j], feats=[lvl_feat[j][None] for lvl_feat in x]) sampling_results.append(sampling_result) loss_mask = self._mask_forward_train(i, x, sampling_results, gt_masks, rcnn_train_cfg, semantic_feat) for name, value in loss_mask.items(): losses['s{}.{}'.format(i, name)] = ( value * lw if 'loss' in name else value) # refine bboxes (same as Cascade R-CNN) if i < self.num_stages - 1 and not self.interleaved: pos_is_gts = [res.pos_is_gt for res in sampling_results] with torch.no_grad(): proposal_list = self.bbox_head[i].refine_bboxes( rois, roi_labels, bbox_pred, pos_is_gts, img_meta) return losses def simple_test(self, img, img_meta, proposals=None, rescale=False): x = self.extract_feat(img) proposal_list = self.simple_test_rpn( x, img_meta, self.test_cfg.rpn) if proposals is None else proposals if self.with_semantic: _, semantic_feat = self.semantic_head(x) else: semantic_feat = None img_shape = img_meta[0]['img_shape'] ori_shape = img_meta[0]['ori_shape'] scale_factor = img_meta[0]['scale_factor'] # "ms" in variable names means multi-stage ms_bbox_result = {} ms_segm_result = {} ms_scores = [] rcnn_test_cfg = self.test_cfg.rcnn rois = bbox2roi(proposal_list) for i in range(self.num_stages): bbox_head = self.bbox_head[i] cls_score, bbox_pred = self._bbox_forward_test( i, x, rois, semantic_feat=semantic_feat) ms_scores.append(cls_score) if self.test_cfg.keep_all_stages: det_bboxes, det_labels = bbox_head.get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) bbox_result = bbox2result(det_bboxes, det_labels, bbox_head.num_classes) ms_bbox_result['stage{}'.format(i)] = bbox_result if self.with_mask: mask_head = self.mask_head[i] if det_bboxes.shape[0] == 0: mask_classes = mask_head.num_classes - 1 segm_result = [[] for _ in range(mask_classes)] else: _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_pred = self._mask_forward_test( i, x, _bboxes, semantic_feat=semantic_feat) segm_result = mask_head.get_seg_masks( mask_pred, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale) ms_segm_result['stage{}'.format(i)] = segm_result if i < self.num_stages - 1: bbox_label = cls_score.argmax(dim=1) rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) det_bboxes, det_labels = self.bbox_head[-1].get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) ms_bbox_result['ensemble'] = bbox_result if self.with_mask: if det_bboxes.shape[0] == 0: mask_classes = self.mask_head[-1].num_classes - 1 segm_result = [[] for _ in range(mask_classes)] else: _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) aug_masks = [] mask_roi_extractor = self.mask_roi_extractor[-1] mask_feats = mask_roi_extractor( x[:len(mask_roi_extractor.featmap_strides)], mask_rois) if self.with_semantic and 'mask' in self.semantic_fusion: mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) mask_feats += mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head(mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, [img_meta] * self.num_stages, self.test_cfg.rcnn) segm_result = self.mask_head[-1].get_seg_masks( merged_masks, _bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale) ms_segm_result['ensemble'] = segm_result if not self.test_cfg.keep_all_stages: if self.with_mask: results = (ms_bbox_result['ensemble'], ms_segm_result['ensemble']) else: results = ms_bbox_result['ensemble'] else: if self.with_mask: results = { stage: (ms_bbox_result[stage], ms_segm_result[stage]) for stage in ms_bbox_result } else: results = ms_bbox_result return results def aug_test(self, imgs, img_metas, proposals=None, rescale=False): """Test with augmentations. If rescale is False, then returned bboxes and masks will fit the scale of imgs[0]. """ if self.with_semantic: semantic_feats = [ self.semantic_head(feat)[1] for feat in self.extract_feats(imgs) ] else: semantic_feats = [None] * len(img_metas) # recompute feats to save memory proposal_list = self.aug_test_rpn( self.extract_feats(imgs), img_metas, self.test_cfg.rpn) rcnn_test_cfg = self.test_cfg.rcnn aug_bboxes = [] aug_scores = [] for x, img_meta, semantic in zip( self.extract_feats(imgs), img_metas, semantic_feats): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip) # "ms" in variable names means multi-stage ms_scores = [] rois = bbox2roi([proposals]) for i in range(self.num_stages): bbox_head = self.bbox_head[i] cls_score, bbox_pred = self._bbox_forward_test( i, x, rois, semantic_feat=semantic) ms_scores.append(cls_score) if i < self.num_stages - 1: bbox_label = cls_score.argmax(dim=1) rois = bbox_head.regress_by_class(rois, bbox_label, bbox_pred, img_meta[0]) cls_score = sum(ms_scores) / float(len(ms_scores)) bboxes, scores = self.bbox_head[-1].get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) bbox_result = bbox2result(det_bboxes, det_labels, self.bbox_head[-1].num_classes) if self.with_mask: if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head[-1].num_classes - 1)] else: aug_masks = [] aug_img_metas = [] for x, img_meta, semantic in zip( self.extract_feats(imgs), img_metas, semantic_feats): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor[-1]( x[:len(self.mask_roi_extractor[-1].featmap_strides)], mask_rois) if self.with_semantic: semantic_feat = semantic mask_semantic_feat = self.semantic_roi_extractor( [semantic_feat], mask_rois) if mask_semantic_feat.shape[-2:] != mask_feats.shape[ -2:]: mask_semantic_feat = F.adaptive_avg_pool2d( mask_semantic_feat, mask_feats.shape[-2:]) mask_feats += mask_semantic_feat last_feat = None for i in range(self.num_stages): mask_head = self.mask_head[i] if self.mask_info_flow: mask_pred, last_feat = mask_head( mask_feats, last_feat) else: mask_pred = mask_head(mask_feats) aug_masks.append(mask_pred.sigmoid().cpu().numpy()) aug_img_metas.append(img_meta) merged_masks = merge_aug_masks(aug_masks, aug_img_metas, self.test_cfg.rcnn) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head[-1].get_seg_masks( merged_masks, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor=1.0, rescale=False) return bbox_result, segm_result else: return bbox_result
24,580
43.210432
79
py
AlignShift
AlignShift-master/mmdet/models/detectors/mask_scoring_rcnn.py
import torch from mmdet.core import bbox2roi, build_assigner, build_sampler from .. import builder from ..registry import DETECTORS from .two_stage import TwoStageDetector @DETECTORS.register_module class MaskScoringRCNN(TwoStageDetector): """Mask Scoring RCNN. https://arxiv.org/abs/1903.00241 """ def __init__(self, backbone, rpn_head, bbox_roi_extractor, bbox_head, mask_roi_extractor, mask_head, train_cfg, test_cfg, neck=None, shared_head=None, mask_iou_head=None, pretrained=None): super(MaskScoringRCNN, self).__init__( backbone=backbone, neck=neck, shared_head=shared_head, rpn_head=rpn_head, bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, mask_roi_extractor=mask_roi_extractor, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained) self.mask_iou_head = builder.build_head(mask_iou_head) self.mask_iou_head.init_weights() def forward_dummy(self, img): raise NotImplementedError # TODO: refactor forward_train in two stage to reduce code redundancy def forward_train(self, img, img_meta, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None): x = self.extract_feat(img) losses = dict() # RPN forward and loss if self.with_rpn: rpn_outs = self.rpn_head(x) rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta, self.train_cfg.rpn) rpn_losses = self.rpn_head.loss( *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore) losses.update(rpn_losses) proposal_cfg = self.train_cfg.get('rpn_proposal', self.test_cfg.rpn) proposal_inputs = rpn_outs + (img_meta, proposal_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) else: proposal_list = proposals # assign gts and sample proposals if self.with_bbox or self.with_mask: bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner) bbox_sampler = build_sampler( self.train_cfg.rcnn.sampler, context=self) num_imgs = img.size(0) if gt_bboxes_ignore is None: gt_bboxes_ignore = [None for _ in range(num_imgs)] sampling_results = [] for i in range(num_imgs): assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], gt_labels[i]) sampling_result = bbox_sampler.sample( assign_result, proposal_list[i], gt_bboxes[i], gt_labels[i], feats=[lvl_feat[i][None] for lvl_feat in x]) sampling_results.append(sampling_result) # bbox head forward and loss if self.with_bbox: rois = bbox2roi([res.bboxes for res in sampling_results]) # TODO: a more flexible way to decide which feature maps to use bbox_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) if self.with_shared_head: bbox_feats = self.shared_head(bbox_feats) cls_score, bbox_pred = self.bbox_head(bbox_feats) bbox_targets = self.bbox_head.get_target(sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn) loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, *bbox_targets) losses.update(loss_bbox) # mask head forward and loss if self.with_mask: if not self.share_roi_extractor: pos_rois = bbox2roi( [res.pos_bboxes for res in sampling_results]) mask_feats = self.mask_roi_extractor( x[:self.mask_roi_extractor.num_inputs], pos_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) else: pos_inds = [] device = bbox_feats.device for res in sampling_results: pos_inds.append( torch.ones( res.pos_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds.append( torch.zeros( res.neg_bboxes.shape[0], device=device, dtype=torch.uint8)) pos_inds = torch.cat(pos_inds) mask_feats = bbox_feats[pos_inds] mask_pred = self.mask_head(mask_feats) mask_targets = self.mask_head.get_target(sampling_results, gt_masks, self.train_cfg.rcnn) pos_labels = torch.cat( [res.pos_gt_labels for res in sampling_results]) loss_mask = self.mask_head.loss(mask_pred, mask_targets, pos_labels) losses.update(loss_mask) # mask iou head forward and loss pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels] mask_iou_pred = self.mask_iou_head(mask_feats, pos_mask_pred) pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels] mask_iou_targets = self.mask_iou_head.get_target( sampling_results, gt_masks, pos_mask_pred, mask_targets, self.train_cfg.rcnn) loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets) losses.update(loss_mask_iou) return losses def simple_test_mask(self, x, img_meta, det_bboxes, det_labels, rescale=False): # image shape of the first image in the batch (only one) ori_shape = img_meta[0]['ori_shape'] scale_factor = img_meta[0]['scale_factor'] if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] mask_scores = [[] for _ in range(self.mask_head.num_classes - 1)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor( x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) mask_pred = self.mask_head(mask_feats) segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes, det_labels, self.test_cfg.rcnn, ori_shape, scale_factor, rescale) # get mask scores with mask iou head mask_iou_pred = self.mask_iou_head( mask_feats, mask_pred[range(det_labels.size(0)), det_labels + 1]) mask_scores = self.mask_iou_head.get_mask_scores( mask_iou_pred, det_bboxes, det_labels) return segm_result, mask_scores
8,580
41.691542
79
py
AlignShift
AlignShift-master/mmdet/models/detectors/test_mixins.py
import torch from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, merge_aug_proposals, multiclass_nms) class RPNTestMixin(object): def simple_test_rpn(self, x, img_meta, rpn_test_cfg): rpn_outs = self.rpn_head(x) proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg) proposal_list = self.rpn_head.get_bboxes(*proposal_inputs) return proposal_list def aug_test_rpn(self, feats, img_metas, rpn_test_cfg): imgs_per_gpu = len(img_metas[0]) aug_proposals = [[] for _ in range(imgs_per_gpu)] for x, img_meta in zip(feats, img_metas): proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg) for i, proposals in enumerate(proposal_list): aug_proposals[i].append(proposals) # reorganize the order of 'img_metas' to match the dimensions # of 'aug_proposals' aug_img_metas = [] for i in range(imgs_per_gpu): aug_img_meta = [] for j in range(len(img_metas)): aug_img_meta.append(img_metas[j][i]) aug_img_metas.append(aug_img_meta) # after merging, proposals will be rescaled to the original image size merged_proposals = [ merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg) for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas) ] return merged_proposals class BBoxTestMixin(object): def simple_test_bboxes(self, x, img_meta, proposals, rcnn_test_cfg, rescale=False): """Test only det bboxes without augmentation.""" rois = bbox2roi(proposals) roi_feats = self.bbox_roi_extractor( x[:len(self.bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: roi_feats = self.shared_head(roi_feats) cls_score, bbox_pred = self.bbox_head(roi_feats) img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] det_bboxes, det_labels = self.bbox_head.get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=rescale, cfg=rcnn_test_cfg) return det_bboxes, det_labels def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg): aug_bboxes = [] aug_scores = [] for x, img_meta in zip(feats, img_metas): # only one image in the batch img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] # TODO more flexible proposals = bbox_mapping(proposal_list[0][:, :4], img_shape, scale_factor, flip) rois = bbox2roi([proposals]) # recompute feature maps to save GPU memory roi_feats = self.bbox_roi_extractor( x[:len(self.bbox_roi_extractor.featmap_strides)], rois) if self.with_shared_head: roi_feats = self.shared_head(roi_feats) cls_score, bbox_pred = self.bbox_head(roi_feats) bboxes, scores = self.bbox_head.get_det_bboxes( rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None) aug_bboxes.append(bboxes) aug_scores.append(scores) # after merging, bboxes will be rescaled to the original image size merged_bboxes, merged_scores = merge_aug_bboxes( aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores, rcnn_test_cfg.score_thr, rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img) return det_bboxes, det_labels class MaskTestMixin(object): def simple_test_mask(self, x, img_meta, det_bboxes, det_labels, rescale=False): # image shape of the first image in the batch (only one) ori_shape = img_meta[0]['ori_shape'] scale_factor = img_meta[0]['scale_factor'] if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] else: # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factor, float): scale_factor = torch.from_numpy(scale_factor).to( det_bboxes.device) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor( x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) mask_pred = self.mask_head(mask_feats) segm_result = self.mask_head.get_seg_masks(mask_pred, _bboxes, det_labels, self.test_cfg.rcnn, ori_shape, scale_factor, rescale) return segm_result def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): if det_bboxes.shape[0] == 0: segm_result = [[] for _ in range(self.mask_head.num_classes - 1)] else: aug_masks = [] for x, img_meta in zip(feats, img_metas): img_shape = img_meta[0]['img_shape'] scale_factor = img_meta[0]['scale_factor'] flip = img_meta[0]['flip'] _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, scale_factor, flip) mask_rois = bbox2roi([_bboxes]) mask_feats = self.mask_roi_extractor( x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois) if self.with_shared_head: mask_feats = self.shared_head(mask_feats) mask_pred = self.mask_head(mask_feats) # convert to numpy array to save memory aug_masks.append(mask_pred.sigmoid().cpu().numpy()) merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg.rcnn) ori_shape = img_metas[0][0]['ori_shape'] segm_result = self.mask_head.get_seg_masks( merged_masks, det_bboxes, det_labels, self.test_cfg.rcnn, ori_shape, scale_factor=1.0, rescale=False) return segm_result
7,380
42.417647
79
py
AlignShift
AlignShift-master/mmdet/models/plugins/non_local.py
import torch import torch.nn as nn from mmcv.cnn import constant_init, normal_init from ..utils import ConvModule class NonLocal2D(nn.Module): """Non-local module. See https://arxiv.org/abs/1711.07971 for details. Args: in_channels (int): Channels of the input feature map. reduction (int): Channel reduction ratio. use_scale (bool): Whether to scale pairwise_weight by 1/inter_channels. conv_cfg (dict): The config dict for convolution layers. (only applicable to conv_out) norm_cfg (dict): The config dict for normalization layers. (only applicable to conv_out) mode (str): Options are `embedded_gaussian` and `dot_product`. """ def __init__(self, in_channels, reduction=2, use_scale=True, conv_cfg=None, norm_cfg=None, mode='embedded_gaussian'): super(NonLocal2D, self).__init__() self.in_channels = in_channels self.reduction = reduction self.use_scale = use_scale self.inter_channels = in_channels // reduction self.mode = mode assert mode in ['embedded_gaussian', 'dot_product'] # g, theta, phi are actually `nn.Conv2d`. Here we use ConvModule for # potential usage. self.g = ConvModule( self.in_channels, self.inter_channels, kernel_size=1, activation=None) self.theta = ConvModule( self.in_channels, self.inter_channels, kernel_size=1, activation=None) self.phi = ConvModule( self.in_channels, self.inter_channels, kernel_size=1, activation=None) self.conv_out = ConvModule( self.inter_channels, self.in_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=None) self.init_weights() def init_weights(self, std=0.01, zeros_init=True): for m in [self.g, self.theta, self.phi]: normal_init(m.conv, std=std) if zeros_init: constant_init(self.conv_out.conv, 0) else: normal_init(self.conv_out.conv, std=std) def embedded_gaussian(self, theta_x, phi_x): # pairwise_weight: [N, HxW, HxW] pairwise_weight = torch.matmul(theta_x, phi_x) if self.use_scale: # theta_x.shape[-1] is `self.inter_channels` pairwise_weight /= theta_x.shape[-1]**0.5 pairwise_weight = pairwise_weight.softmax(dim=-1) return pairwise_weight def dot_product(self, theta_x, phi_x): # pairwise_weight: [N, HxW, HxW] pairwise_weight = torch.matmul(theta_x, phi_x) pairwise_weight /= pairwise_weight.shape[-1] return pairwise_weight def forward(self, x): n, _, h, w = x.shape # g_x: [N, HxW, C] g_x = self.g(x).view(n, self.inter_channels, -1) g_x = g_x.permute(0, 2, 1) # theta_x: [N, HxW, C] theta_x = self.theta(x).view(n, self.inter_channels, -1) theta_x = theta_x.permute(0, 2, 1) # phi_x: [N, C, HxW] phi_x = self.phi(x).view(n, self.inter_channels, -1) pairwise_func = getattr(self, self.mode) # pairwise_weight: [N, HxW, HxW] pairwise_weight = pairwise_func(theta_x, phi_x) # y: [N, HxW, C] y = torch.matmul(pairwise_weight, g_x) # y: [N, C, H, W] y = y.permute(0, 2, 1).reshape(n, self.inter_channels, h, w) output = x + self.conv_out(y) return output
3,708
31.252174
79
py
AlignShift
AlignShift-master/mmdet/models/plugins/generalized_attention.py
import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import kaiming_init class GeneralizedAttention(nn.Module): """GeneralizedAttention module. See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' (https://arxiv.org/abs/1711.07971) for details. Args: in_dim (int): Channels of the input feature map. spatial_range (int): The spatial range. -1 indicates no spatial range constraint. num_heads (int): The head number of empirical_attention module. position_embedding_dim (int): The position embedding dimension. position_magnitude (int): A multiplier acting on coord difference. kv_stride (int): The feature stride acting on key/value feature map. q_stride (int): The feature stride acting on query feature map. attention_type (str): A binary indicator string for indicating which items in generalized empirical_attention module are used. '1000' indicates 'query and key content' (appr - appr) item, '0100' indicates 'query content and relative position' (appr - position) item, '0010' indicates 'key content only' (bias - appr) item, '0001' indicates 'relative position only' (bias - position) item. """ def __init__(self, in_dim, spatial_range=-1, num_heads=9, position_embedding_dim=-1, position_magnitude=1, kv_stride=2, q_stride=1, attention_type='1111'): super(GeneralizedAttention, self).__init__() # hard range means local range for non-local operation self.position_embedding_dim = ( position_embedding_dim if position_embedding_dim > 0 else in_dim) self.position_magnitude = position_magnitude self.num_heads = num_heads self.channel_in = in_dim self.spatial_range = spatial_range self.kv_stride = kv_stride self.q_stride = q_stride self.attention_type = [bool(int(_)) for _ in attention_type] self.qk_embed_dim = in_dim // num_heads out_c = self.qk_embed_dim * num_heads if self.attention_type[0] or self.attention_type[1]: self.query_conv = nn.Conv2d( in_channels=in_dim, out_channels=out_c, kernel_size=1, bias=False) self.query_conv.kaiming_init = True if self.attention_type[0] or self.attention_type[2]: self.key_conv = nn.Conv2d( in_channels=in_dim, out_channels=out_c, kernel_size=1, bias=False) self.key_conv.kaiming_init = True self.v_dim = in_dim // num_heads self.value_conv = nn.Conv2d( in_channels=in_dim, out_channels=self.v_dim * num_heads, kernel_size=1, bias=False) self.value_conv.kaiming_init = True if self.attention_type[1] or self.attention_type[3]: self.appr_geom_fc_x = nn.Linear( self.position_embedding_dim // 2, out_c, bias=False) self.appr_geom_fc_x.kaiming_init = True self.appr_geom_fc_y = nn.Linear( self.position_embedding_dim // 2, out_c, bias=False) self.appr_geom_fc_y.kaiming_init = True if self.attention_type[2]: stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv self.appr_bias = nn.Parameter(appr_bias_value) if self.attention_type[3]: stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv self.geom_bias = nn.Parameter(geom_bias_value) self.proj_conv = nn.Conv2d( in_channels=self.v_dim * num_heads, out_channels=in_dim, kernel_size=1, bias=True) self.proj_conv.kaiming_init = True self.gamma = nn.Parameter(torch.zeros(1)) if self.spatial_range >= 0: # only works when non local is after 3*3 conv if in_dim == 256: max_len = 84 elif in_dim == 512: max_len = 42 max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) local_constraint_map = np.ones( (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int) for iy in range(max_len): for ix in range(max_len): local_constraint_map[ iy, ix, max((iy - self.spatial_range) // self.kv_stride, 0):min((iy + self.spatial_range + 1) // self.kv_stride + 1, max_len), max((ix - self.spatial_range) // self.kv_stride, 0):min((ix + self.spatial_range + 1) // self.kv_stride + 1, max_len)] = 0 self.local_constraint_map = nn.Parameter( torch.from_numpy(local_constraint_map).byte(), requires_grad=False) if self.q_stride > 1: self.q_downsample = nn.AvgPool2d( kernel_size=1, stride=self.q_stride) else: self.q_downsample = None if self.kv_stride > 1: self.kv_downsample = nn.AvgPool2d( kernel_size=1, stride=self.kv_stride) else: self.kv_downsample = None self.init_weights() def get_position_embedding(self, h, w, h_kv, w_kv, q_stride, kv_stride, device, feat_dim, wave_length=1000): h_idxs = torch.linspace(0, h - 1, h).cuda(device) h_idxs = h_idxs.view((h, 1)) * q_stride w_idxs = torch.linspace(0, w - 1, w).cuda(device) w_idxs = w_idxs.view((w, 1)) * q_stride h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).cuda(device) h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).cuda(device) w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride # (h, h_kv, 1) h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) h_diff *= self.position_magnitude # (w, w_kv, 1) w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) w_diff *= self.position_magnitude feat_range = torch.arange(0, feat_dim / 4).cuda(device) dim_mat = torch.Tensor([wave_length]).cuda(device) dim_mat = dim_mat**((4. / feat_dim) * feat_range) dim_mat = dim_mat.view((1, 1, -1)) embedding_x = torch.cat( ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) embedding_y = torch.cat( ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) return embedding_x, embedding_y def forward(self, x_input): num_heads = self.num_heads # use empirical_attention if self.q_downsample is not None: x_q = self.q_downsample(x_input) else: x_q = x_input n, _, h, w = x_q.shape if self.kv_downsample is not None: x_kv = self.kv_downsample(x_input) else: x_kv = x_input _, _, h_kv, w_kv = x_kv.shape if self.attention_type[0] or self.attention_type[1]: proj_query = self.query_conv(x_q).view( (n, num_heads, self.qk_embed_dim, h * w)) proj_query = proj_query.permute(0, 1, 3, 2) if self.attention_type[0] or self.attention_type[2]: proj_key = self.key_conv(x_kv).view( (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) if self.attention_type[1] or self.attention_type[3]: position_embed_x, position_embed_y = self.get_position_embedding( h, w, h_kv, w_kv, self.q_stride, self.kv_stride, x_input.device, self.position_embedding_dim) # (n, num_heads, w, w_kv, dim) position_feat_x = self.appr_geom_fc_x(position_embed_x).\ view(1, w, w_kv, num_heads, self.qk_embed_dim).\ permute(0, 3, 1, 2, 4).\ repeat(n, 1, 1, 1, 1) # (n, num_heads, h, h_kv, dim) position_feat_y = self.appr_geom_fc_y(position_embed_y).\ view(1, h, h_kv, num_heads, self.qk_embed_dim).\ permute(0, 3, 1, 2, 4).\ repeat(n, 1, 1, 1, 1) position_feat_x /= math.sqrt(2) position_feat_y /= math.sqrt(2) # accelerate for saliency only if (np.sum(self.attention_type) == 1) and self.attention_type[2]: appr_bias = self.appr_bias.\ view(1, num_heads, 1, self.qk_embed_dim).\ repeat(n, 1, 1, 1) energy = torch.matmul(appr_bias, proj_key).\ view(n, num_heads, 1, h_kv * w_kv) h = 1 w = 1 else: # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for if not self.attention_type[0]: energy = torch.zeros( n, num_heads, h, w, h_kv, w_kv, dtype=x_input.dtype, device=x_input.device) # attention_type[0]: appr - appr # attention_type[1]: appr - position # attention_type[2]: bias - appr # attention_type[3]: bias - position if self.attention_type[0] or self.attention_type[2]: if self.attention_type[0] and self.attention_type[2]: appr_bias = self.appr_bias.\ view(1, num_heads, 1, self.qk_embed_dim) energy = torch.matmul(proj_query + appr_bias, proj_key).\ view(n, num_heads, h, w, h_kv, w_kv) elif self.attention_type[0]: energy = torch.matmul(proj_query, proj_key).\ view(n, num_heads, h, w, h_kv, w_kv) elif self.attention_type[2]: appr_bias = self.appr_bias.\ view(1, num_heads, 1, self.qk_embed_dim).\ repeat(n, 1, 1, 1) energy += torch.matmul(appr_bias, proj_key).\ view(n, num_heads, 1, 1, h_kv, w_kv) if self.attention_type[1] or self.attention_type[3]: if self.attention_type[1] and self.attention_type[3]: geom_bias = self.geom_bias.\ view(1, num_heads, 1, self.qk_embed_dim) proj_query_reshape = (proj_query + geom_bias).\ view(n, num_heads, h, w, self.qk_embed_dim) energy_x = torch.matmul( proj_query_reshape.permute(0, 1, 3, 2, 4), position_feat_x.permute(0, 1, 2, 4, 3)) energy_x = energy_x.\ permute(0, 1, 3, 2, 4).unsqueeze(4) energy_y = torch.matmul( proj_query_reshape, position_feat_y.permute(0, 1, 2, 4, 3)) energy_y = energy_y.unsqueeze(5) energy += energy_x + energy_y elif self.attention_type[1]: proj_query_reshape = proj_query.\ view(n, num_heads, h, w, self.qk_embed_dim) proj_query_reshape = proj_query_reshape.\ permute(0, 1, 3, 2, 4) position_feat_x_reshape = position_feat_x.\ permute(0, 1, 2, 4, 3) position_feat_y_reshape = position_feat_y.\ permute(0, 1, 2, 4, 3) energy_x = torch.matmul(proj_query_reshape, position_feat_x_reshape) energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) energy_y = torch.matmul(proj_query_reshape, position_feat_y_reshape) energy_y = energy_y.unsqueeze(5) energy += energy_x + energy_y elif self.attention_type[3]: geom_bias = self.geom_bias.\ view(1, num_heads, self.qk_embed_dim, 1).\ repeat(n, 1, 1, 1) position_feat_x_reshape = position_feat_x.\ view(n, num_heads, w*w_kv, self.qk_embed_dim) position_feat_y_reshape = position_feat_y.\ view(n, num_heads, h * h_kv, self.qk_embed_dim) energy_x = torch.matmul(position_feat_x_reshape, geom_bias) energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) energy_y = torch.matmul(position_feat_y_reshape, geom_bias) energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) energy += energy_x + energy_y energy = energy.view(n, num_heads, h * w, h_kv * w_kv) if self.spatial_range >= 0: cur_local_constraint_map = \ self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ contiguous().\ view(1, 1, h*w, h_kv*w_kv) energy = energy.masked_fill_(cur_local_constraint_map, float('-inf')) attention = F.softmax(energy, 3) proj_value = self.value_conv(x_kv) proj_value_reshape = proj_value.\ view((n, num_heads, self.v_dim, h_kv * w_kv)).\ permute(0, 1, 3, 2) out = torch.matmul(attention, proj_value_reshape).\ permute(0, 1, 3, 2).\ contiguous().\ view(n, self.v_dim * self.num_heads, h, w) out = self.proj_conv(out) out = self.gamma * out + x_input return out def init_weights(self): for m in self.modules(): if hasattr(m, 'kaiming_init') and m.kaiming_init: kaiming_init( m, mode='fan_in', nonlinearity='leaky_relu', bias=0, distribution='uniform', a=1)
15,004
38.075521
79
py
AlignShift
AlignShift-master/mmdet/models/necks/fpn.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import xavier_init from mmdet.core import auto_fp16 from ..registry import NECKS from ..utils import ConvModule @NECKS.register_module class FPN(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, activation=None): super(FPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.activation = activation self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False if end_level == -1: self.backbone_end_level = self.num_ins assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed self.backbone_end_level = end_level assert end_level <= len(in_channels) assert num_outs == end_level - start_level self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.extra_convs_on_inputs = extra_convs_on_inputs self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, activation=self.activation, inplace=False) fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=self.activation, inplace=False) self.lateral_convs.append(l_conv) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) extra_levels = num_outs - self.backbone_end_level + self.start_level if add_extra_convs and extra_levels >= 1: for i in range(extra_levels): if i == 0 and self.extra_convs_on_inputs: in_channels = self.in_channels[self.backbone_end_level - 1] else: in_channels = out_channels extra_fpn_conv = ConvModule( in_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=self.activation, inplace=False) self.fpn_convs.append(extra_fpn_conv) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') @auto_fp16() def forward(self, inputs): assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): laterals[i - 1] += F.interpolate( laterals[i], scale_factor=2, mode='nearest') # build outputs # part 1: from original levels outs = [ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) ] # part 2: add extra levels if self.num_outs > len(outs): # use max pool to get more levels on top of outputs # (e.g., Faster R-CNN, Mask R-CNN) if not self.add_extra_convs: for i in range(self.num_outs - used_backbone_levels): outs.append(F.max_pool2d(outs[-1], 1, stride=2)) # add conv layers on top of original feature maps (RetinaNet) else: if self.extra_convs_on_inputs: orig = inputs[self.backbone_end_level - 1] outs.append(self.fpn_convs[used_backbone_levels](orig)) else: outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) for i in range(used_backbone_levels + 1, self.num_outs): if self.relu_before_extra_convs: outs.append(self.fpn_convs[i](F.relu(outs[-1]))) else: outs.append(self.fpn_convs[i](outs[-1])) return tuple(outs)
5,289
36.253521
79
py
AlignShift
AlignShift-master/mmdet/models/necks/just_one_outs_neck.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import xavier_init from mmcv.cnn.weight_init import caffe2_xavier_init from mmdet.core import auto_fp16 from ..registry import NECKS from ..utils import ConvModule import torch import gc @NECKS.register_module class FPNLast(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, activation=None): super().__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.activation = activation self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False if end_level == -1: self.backbone_end_level = self.num_ins #assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed raise 'please dont.' self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.extra_convs_on_inputs = extra_convs_on_inputs self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, activation=self.activation, inplace=False) self.lateral_convs.append(l_conv) for i in range(1): fpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=self.activation, inplace=False) self.fpn_convs.append(fpn_conv) # add extra conv layers (e.g., RetinaNet) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') @auto_fp16() def forward(self, inputs): # assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) for i in range(used_backbone_levels - 1, 0, -1): laterals[i - 1] += F.interpolate( laterals[i], scale_factor=2, mode='nearest') # build outputs # part 1: from original levels # outs = [ # self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) # ] for l in self.fpn_convs: laterals[0] = l(laterals[0]) return tuple([laterals[0]]) @NECKS.register_module class FPNCatLast(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, activation=None): super().__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.activation = activation self.relu_before_extra_convs = relu_before_extra_convs self.no_norm_on_lateral = no_norm_on_lateral self.fp16_enabled = False if end_level == -1: self.backbone_end_level = self.num_ins #assert num_outs >= self.num_ins - start_level else: # if end_level < inputs, no extra level is allowed raise 'please dont.' self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.extra_convs_on_inputs = extra_convs_on_inputs self.lateral_convs = nn.ModuleList() self.fpn_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): l_conv = ConvModule( in_channels[i], out_channels, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, activation=self.activation, inplace=False) self.lateral_convs.append(l_conv) fpn_conv = ConvModule( out_channels*(len(self.lateral_convs)-1), out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=self.activation, inplace=False) self.fpn_convs.append(fpn_conv) # default init_weights for conv(msra) and norm in ConvModule def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') @auto_fp16() def forward(self, inputs): assert len(inputs) == len(self.in_channels) # build laterals laterals = [ lateral_conv(inputs[i + self.start_level]) for i, lateral_conv in enumerate(self.lateral_convs) ] # build top-down path used_backbone_levels = len(laterals) out = laterals[-1] for i in range(used_backbone_levels - 2, 0, -1): out = torch.cat([laterals[i], F.interpolate(out, scale_factor=2, mode='nearest')], dim=1) # build outputs # part 1: from original levels # outs = [ # self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) # ] return tuple([self.fpn_convs[0](out)]) @NECKS.register_module class HRFPNLast(nn.Module): """HRFPN (High Resolution Feature Pyrmamids) arXiv: https://arxiv.org/abs/1904.04514 Args: in_channels (list): number of channels for each branch. out_channels (int): output channels of feature pyramids. num_outs (int): number of output stages. pooling_type (str): pooling for generating feature pyramids from {MAX, AVG}. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. stride (int): stride of 3x3 convolutional layers """ def __init__(self, in_channels, out_channels, num_outs=1, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1): super().__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule( sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, activation=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule( out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, activation=None)) if pooling_type == 'MAX': self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m) def forward(self, inputs): assert len(inputs) == self.num_ins outs = inputs[0] for i in range(1, self.num_ins): del inputs[0] gc.collect() outs = torch.cat([outs, F.interpolate(inputs[0], scale_factor=2**i, mode='bilinear')], dim=1) if outs.requires_grad and self.with_cp: outs = checkpoint(self.reduction_conv, outs) else: outs = self.reduction_conv(outs) # outs = [out] # for i in range(1, self.num_outs): # outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) # outputs = [] # for i in range(self.num_outs): # if outs[i].requires_grad and self.with_cp: # tmp_out = checkpoint(self.fpn_convs[i], outs[i]) # else: # tmp_out = self.fpn_convs[i](outs[i]) # outputs.append(tmp_out) return tuple([self.fpn_convs[0](outs)]) @NECKS.register_module class MyFPN(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() for p in range(4, 0, -1): layer = nn.Conv2d(in_channels[p - 1], out_channels, 1) name = 'lateral%d' % p self.add_module(name, layer) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, inputs): x = self.lateral4(inputs[-1]) for p in range(3, 0, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") lateral = getattr(self, 'lateral%d' % p)(inputs[p-1]) x += lateral return tuple([x]) @NECKS.register_module class MyFPN3(nn.Module): def __init__(self, in_channels, out_channels): super().__init__() for p in range(3, 0, -1): layer = nn.Conv2d(in_channels[p - 1], out_channels, 1) name = 'lateral%d' % p self.add_module(name, layer) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, inputs): x = self.lateral3(inputs[-2]) for p in range(2, 0, -1): x = F.interpolate(x, scale_factor=2, mode="nearest") lateral = getattr(self, 'lateral%d' % p)(inputs[p-1]) x += lateral return tuple([x])
11,514
32.184438
105
py
AlignShift
AlignShift-master/mmdet/models/necks/bfp.py
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import xavier_init from ..plugins import NonLocal2D from ..registry import NECKS from ..utils import ConvModule @NECKS.register_module class BFP(nn.Module): """BFP (Balanced Feature Pyrmamids) BFP takes multi-level features as inputs and gather them into a single one, then refine the gathered feature and scatter the refined results to multi-level features. This module is used in Libra R-CNN (CVPR 2019), see https://arxiv.org/pdf/1904.02701.pdf for details. Args: in_channels (int): Number of input channels (feature maps of all levels should have the same channels). num_levels (int): Number of input feature levels. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. refine_level (int): Index of integration and refine level of BSF in multi-level features from bottom to top. refine_type (str): Type of the refine op, currently support [None, 'conv', 'non_local']. """ def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None): super(BFP, self).__init__() assert refine_type in [None, 'conv', 'non_local'] self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert 0 <= self.refine_level < self.num_levels if self.refine_type == 'conv': self.refine = ConvModule( self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif self.refine_type == 'non_local': self.refine = NonLocal2D( self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') def forward(self, inputs): assert len(inputs) == self.num_levels # step 1: gather multi-level features by resize and average feats = [] gather_size = inputs[self.refine_level].size()[2:] for i in range(self.num_levels): if i < self.refine_level: gathered = F.adaptive_max_pool2d( inputs[i], output_size=gather_size) else: gathered = F.interpolate( inputs[i], size=gather_size, mode='nearest') feats.append(gathered) bsf = sum(feats) / len(feats) # step 2: refine gathered features if self.refine_type is not None: bsf = self.refine(bsf) # step 3: scatter refined features to multi-levels by a residual path outs = [] for i in range(self.num_levels): out_size = inputs[i].size()[2:] if i < self.refine_level: residual = F.interpolate(bsf, size=out_size, mode='nearest') else: residual = F.adaptive_max_pool2d(bsf, output_size=out_size) outs.append(residual + inputs[i]) return tuple(outs)
3,598
33.941748
79
py
AlignShift
AlignShift-master/mmdet/models/necks/hrfpn.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn.weight_init import caffe2_xavier_init from torch.utils.checkpoint import checkpoint from ..registry import NECKS from ..utils import ConvModule @NECKS.register_module class HRFPN(nn.Module): """HRFPN (High Resolution Feature Pyrmamids) arXiv: https://arxiv.org/abs/1904.04514 Args: in_channels (list): number of channels for each branch. out_channels (int): output channels of feature pyramids. num_outs (int): number of output stages. pooling_type (str): pooling for generating feature pyramids from {MAX, AVG}. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. stride (int): stride of 3x3 convolutional layers """ def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1): super(HRFPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule( sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, activation=None) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule( out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, activation=None)) if pooling_type == 'MAX': self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): caffe2_xavier_init(m) def forward(self, inputs): assert len(inputs) == self.num_ins outs = [inputs[0]] for i in range(1, self.num_ins): outs.append( F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear')) out = torch.cat(outs, dim=1) if out.requires_grad and self.with_cp: out = checkpoint(self.reduction_conv, out) else: out = self.reduction_conv(out) outs = [out] for i in range(1, self.num_outs): outs.append(self.pooling(out, kernel_size=2**i, stride=2**i)) outputs = [] for i in range(self.num_outs): if outs[i].requires_grad and self.with_cp: tmp_out = checkpoint(self.fpn_convs[i], outs[i]) else: tmp_out = self.fpn_convs[i](outs[i]) outputs.append(tmp_out) return tuple(outputs)
3,363
32.306931
79
py
AlignShift
AlignShift-master/mmdet/models/roi_extractors/single_level.py
from __future__ import division import torch import torch.nn as nn from mmdet import ops from mmdet.core import force_fp32 from ..registry import ROI_EXTRACTORS @ROI_EXTRACTORS.register_module class SingleRoIExtractor(nn.Module): """Extract RoI features from a single level feature map. If there are mulitple input feature levels, each RoI is mapped to a level according to its scale. Args: roi_layer (dict): Specify RoI layer type and arguments. out_channels (int): Output channels of RoI layers. featmap_strides (int): Strides of input feature maps. finest_scale (int): Scale threshold of mapping to level 0. """ def __init__(self, roi_layer, out_channels, featmap_strides, finest_scale=56): super(SingleRoIExtractor, self).__init__() self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides) self.out_channels = out_channels self.featmap_strides = featmap_strides self.finest_scale = finest_scale self.fp16_enabled = False @property def num_inputs(self): """int: Input feature map levels.""" return len(self.featmap_strides) def init_weights(self): pass def build_roi_layers(self, layer_cfg, featmap_strides): cfg = layer_cfg.copy() layer_type = cfg.pop('type') assert hasattr(ops, layer_type) layer_cls = getattr(ops, layer_type) roi_layers = nn.ModuleList( [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides]) return roi_layers def map_roi_levels(self, rois, num_levels): """Map rois to corresponding feature levels by scales. - scale < finest_scale * 2: level 0 - finest_scale * 2 <= scale < finest_scale * 4: level 1 - finest_scale * 4 <= scale < finest_scale * 8: level 2 - scale >= finest_scale * 8: level 3 Args: rois (Tensor): Input RoIs, shape (k, 5). num_levels (int): Total level number. Returns: Tensor: Level index (0-based) of each RoI, shape (k, ) """ scale = torch.sqrt( (rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1)) target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6)) target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long() return target_lvls def roi_rescale(self, rois, scale_factor): cx = (rois[:, 1] + rois[:, 3]) * 0.5 cy = (rois[:, 2] + rois[:, 4]) * 0.5 w = rois[:, 3] - rois[:, 1] + 1 h = rois[:, 4] - rois[:, 2] + 1 new_w = w * scale_factor new_h = h * scale_factor x1 = cx - new_w * 0.5 + 0.5 x2 = cx + new_w * 0.5 - 0.5 y1 = cy - new_h * 0.5 + 0.5 y2 = cy + new_h * 0.5 - 0.5 new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1) return new_rois @force_fp32(apply_to=('feats', ), out_fp16=True) def forward(self, feats, rois, roi_scale_factor=None): if len(feats) == 1: return self.roi_layers[0](feats[0], rois) out_size = self.roi_layers[0].out_size num_levels = len(feats) target_lvls = self.map_roi_levels(rois, num_levels) roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size) if roi_scale_factor is not None: rois = self.roi_rescale(rois, roi_scale_factor) for i in range(num_levels): inds = target_lvls == i if inds.any(): rois_ = rois[inds, :] roi_feats_t = self.roi_layers[i](feats[i], rois_) roi_feats[inds] = roi_feats_t return roi_feats
3,794
34.138889
79
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/reppoints_head.py
from __future__ import division import numpy as np import torch import torch.nn as nn from mmcv.cnn import normal_init from mmdet.core import (PointGenerator, multi_apply, multiclass_nms, point_target) from mmdet.ops import DeformConv from ..builder import build_loss from ..registry import HEADS from ..utils import ConvModule, bias_init_with_prob @HEADS.register_module class RepPointsHead(nn.Module): """RepPoint head. Args: in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of channels of the feature map. point_feat_channels (int): Number of channels of points features. stacked_convs (int): How many conv layers are used. gradient_mul (float): The multiplier to gradients from points refinement and recognition. point_strides (Iterable): points strides. point_base_scale (int): bbox scale for assigning labels. loss_cls (dict): Config of classification loss. loss_bbox_init (dict): Config of initial points loss. loss_bbox_refine (dict): Config of points loss in refinement. use_grid_points (bool): If we use bounding box representation, the reppoints is represented as grid points on the bounding box. center_init (bool): Whether to use center point assignment. transform_method (str): The methods to transform RepPoints to bbox. """ # noqa: W605 def __init__(self, num_classes, in_channels, feat_channels=256, point_feat_channels=256, stacked_convs=3, num_points=9, gradient_mul=0.1, point_strides=[8, 16, 32, 64, 128], point_base_scale=4, conv_cfg=None, norm_cfg=None, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_init=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5), loss_bbox_refine=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0), use_grid_points=False, center_init=True, transform_method='moment', moment_mul=0.01): super(RepPointsHead, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.point_feat_channels = point_feat_channels self.stacked_convs = stacked_convs self.num_points = num_points self.gradient_mul = gradient_mul self.point_base_scale = point_base_scale self.point_strides = point_strides self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.sampling = loss_cls['type'] not in ['FocalLoss'] self.loss_cls = build_loss(loss_cls) self.loss_bbox_init = build_loss(loss_bbox_init) self.loss_bbox_refine = build_loss(loss_bbox_refine) self.use_grid_points = use_grid_points self.center_init = center_init self.transform_method = transform_method if self.transform_method == 'moment': self.moment_transfer = nn.Parameter( data=torch.zeros(2), requires_grad=True) self.moment_mul = moment_mul if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes - 1 else: self.cls_out_channels = self.num_classes self.point_generators = [PointGenerator() for _ in self.point_strides] # we use deformable conv to extract points features self.dcn_kernel = int(np.sqrt(num_points)) self.dcn_pad = int((self.dcn_kernel - 1) / 2) assert self.dcn_kernel * self.dcn_kernel == num_points, \ "The points number should be a square number." assert self.dcn_kernel % 2 == 1, \ "The points number should be an odd square number." dcn_base = np.arange(-self.dcn_pad, self.dcn_pad + 1).astype(np.float64) dcn_base_y = np.repeat(dcn_base, self.dcn_kernel) dcn_base_x = np.tile(dcn_base, self.dcn_kernel) dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape( (-1)) self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1) self._init_layers() def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points self.reppoints_cls_conv = DeformConv(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, self.cls_out_channels, 1, 1, 0) self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, self.point_feat_channels, 3, 1, 1) self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) self.reppoints_pts_refine_conv = DeformConv(self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad) self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) def init_weights(self): for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.reppoints_cls_conv, std=0.01) normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls) normal_init(self.reppoints_pts_init_conv, std=0.01) normal_init(self.reppoints_pts_init_out, std=0.01) normal_init(self.reppoints_pts_refine_conv, std=0.01) normal_init(self.reppoints_pts_refine_out, std=0.01) def points2bbox(self, pts, y_first=True): """ Converting the points set into bounding box. :param pts: the input points sets (fields), each points set (fields) is represented as 2n scalar. :param y_first: if y_fisrt=True, the point set is represented as [y1, x1, y2, x2 ... yn, xn], otherwise the point set is represented as [x1, y1, x2, y2 ... xn, yn]. :return: each points set is converting to a bbox [x1, y1, x2, y2]. """ pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:]) pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1, ...] pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0, ...] if self.transform_method == 'minmax': bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'partial_minmax': pts_y = pts_y[:, :4, ...] pts_x = pts_x[:, :4, ...] bbox_left = pts_x.min(dim=1, keepdim=True)[0] bbox_right = pts_x.max(dim=1, keepdim=True)[0] bbox_up = pts_y.min(dim=1, keepdim=True)[0] bbox_bottom = pts_y.max(dim=1, keepdim=True)[0] bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom], dim=1) elif self.transform_method == 'moment': pts_y_mean = pts_y.mean(dim=1, keepdim=True) pts_x_mean = pts_x.mean(dim=1, keepdim=True) pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True) pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True) moment_transfer = (self.moment_transfer * self.moment_mul) + ( self.moment_transfer.detach() * (1 - self.moment_mul)) moment_width_transfer = moment_transfer[0] moment_height_transfer = moment_transfer[1] half_width = pts_x_std * torch.exp(moment_width_transfer) half_height = pts_y_std * torch.exp(moment_height_transfer) bbox = torch.cat([ pts_x_mean - half_width, pts_y_mean - half_height, pts_x_mean + half_width, pts_y_mean + half_height ], dim=1) else: raise NotImplementedError return bbox def gen_grid_from_reg(self, reg, previous_boxes): """ Base on the previous bboxes and regression values, we compute the regressed bboxes and generate the grids on the bboxes. :param reg: the regression value to previous bboxes. :param previous_boxes: previous bboxes. :return: generate grids on the regressed bboxes. """ b, _, h, w = reg.shape bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2. bwh = (previous_boxes[:, 2:, ...] - previous_boxes[:, :2, ...]).clamp(min=1e-6) grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp( reg[:, 2:, ...]) grid_wh = bwh * torch.exp(reg[:, 2:, ...]) grid_left = grid_topleft[:, [0], ...] grid_top = grid_topleft[:, [1], ...] grid_width = grid_wh[:, [0], ...] grid_height = grid_wh[:, [1], ...] intervel = torch.linspace(0., 1., self.dcn_kernel).view( 1, self.dcn_kernel, 1, 1).type_as(reg) grid_x = grid_left + grid_width * intervel grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1) grid_x = grid_x.view(b, -1, h, w) grid_y = grid_top + grid_height * intervel grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1) grid_y = grid_y.view(b, -1, h, w) grid_yx = torch.stack([grid_y, grid_x], dim=2) grid_yx = grid_yx.view(b, -1, h, w) regressed_bbox = torch.cat([ grid_left, grid_top, grid_left + grid_width, grid_top + grid_height ], 1) return grid_yx, regressed_bbox def forward_single(self, x): dcn_base_offset = self.dcn_base_offset.type_as(x) # If we use center_init, the initial reppoints is from center points. # If we use bounding bbox representation, the initial reppoints is # from regular grid placed on a pre-defined bbox. if self.use_grid_points or not self.center_init: scale = self.point_base_scale / 2 points_init = dcn_base_offset / dcn_base_offset.max() * scale bbox_init = x.new_tensor([-scale, -scale, scale, scale]).view(1, 4, 1, 1) else: points_init = 0 cls_feat = x pts_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: pts_feat = reg_conv(pts_feat) # initialize reppoints pts_out_init = self.reppoints_pts_init_out( self.relu(self.reppoints_pts_init_conv(pts_feat))) if self.use_grid_points: pts_out_init, bbox_out_init = self.gen_grid_from_reg( pts_out_init, bbox_init.detach()) else: pts_out_init = pts_out_init + points_init # refine and classify reppoints pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach( ) + self.gradient_mul * pts_out_init dcn_offset = pts_out_init_grad_mul - dcn_base_offset cls_out = self.reppoints_cls_out( self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset))) pts_out_refine = self.reppoints_pts_refine_out( self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset))) if self.use_grid_points: pts_out_refine, bbox_out_refine = self.gen_grid_from_reg( pts_out_refine, bbox_out_init.detach()) else: pts_out_refine = pts_out_refine + pts_out_init.detach() return cls_out, pts_out_init, pts_out_refine def forward(self, feats): return multi_apply(self.forward_single, feats) def get_points(self, featmap_sizes, img_metas): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. Returns: tuple: points of each image, valid flags of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # points center for one time multi_level_points = [] for i in range(num_levels): points = self.point_generators[i].grid_points( featmap_sizes[i], self.point_strides[i]) multi_level_points.append(points) points_list = [[point.clone() for point in multi_level_points] for _ in range(num_imgs)] # for each image, we compute valid flags of multi level grids valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] for i in range(num_levels): point_stride = self.point_strides[i] feat_h, feat_w = featmap_sizes[i] h, w, _ = img_meta['pad_shape'] valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w) flags = self.point_generators[i].valid_flags( (feat_h, feat_w), (valid_feat_h, valid_feat_w)) multi_level_flags.append(flags) valid_flag_list.append(multi_level_flags) return points_list, valid_flag_list def centers_to_bboxes(self, point_list): """Get bboxes according to center points. Only used in MaxIOUAssigner. """ bbox_list = [] for i_img, point in enumerate(point_list): bbox = [] for i_lvl in range(len(self.point_strides)): scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5 bbox_shift = torch.Tensor([-scale, -scale, scale, scale]).view(1, 4).type_as(point[0]) bbox_center = torch.cat( [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift) bbox_list.append(bbox) return bbox_list def offset_to_pts(self, center_list, pred_list): """Change from point offset to point coordinate. """ pts_list = [] for i_lvl in range(len(self.point_strides)): pts_lvl = [] for i_img in range(len(center_list)): pts_center = center_list[i_img][i_lvl][:, :2].repeat( 1, self.num_points) pts_shift = pred_list[i_lvl][i_img] yx_pts_shift = pts_shift.permute(1, 2, 0).view( -1, 2 * self.num_points) y_pts_shift = yx_pts_shift[..., 0::2] x_pts_shift = yx_pts_shift[..., 1::2] xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1) xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1) pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center pts_lvl.append(pts) pts_lvl = torch.stack(pts_lvl, 0) pts_list.append(pts_lvl) return pts_list def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels, label_weights, bbox_gt_init, bbox_weights_init, bbox_gt_refine, bbox_weights_refine, stride, num_total_samples_init, num_total_samples_refine): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples_refine) # points loss bbox_gt_init = bbox_gt_init.reshape(-1, 4) bbox_weights_init = bbox_weights_init.reshape(-1, 4) bbox_pred_init = self.points2bbox( pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False) bbox_gt_refine = bbox_gt_refine.reshape(-1, 4) bbox_weights_refine = bbox_weights_refine.reshape(-1, 4) bbox_pred_refine = self.points2bbox( pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False) normalize_term = self.point_base_scale * stride loss_pts_init = self.loss_bbox_init( bbox_pred_init / normalize_term, bbox_gt_init / normalize_term, bbox_weights_init, avg_factor=num_total_samples_init) loss_pts_refine = self.loss_bbox_refine( bbox_pred_refine / normalize_term, bbox_gt_refine / normalize_term, bbox_weights_refine, avg_factor=num_total_samples_refine) return loss_cls, loss_pts_init, loss_pts_refine def loss(self, cls_scores, pts_preds_init, pts_preds_refine, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.point_generators) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 # target for initial stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas) pts_coordinate_preds_init = self.offset_to_pts(center_list, pts_preds_init) if cfg.init.assigner['type'] == 'PointAssigner': # Assign target for center list candidate_list = center_list else: # transform center list to bbox list and # assign target for bbox list bbox_list = self.centers_to_bboxes(center_list) candidate_list = bbox_list cls_reg_targets_init = point_target( candidate_list, valid_flag_list, gt_bboxes, img_metas, cfg.init, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init, num_total_pos_init, num_total_neg_init) = cls_reg_targets_init num_total_samples_init = ( num_total_pos_init + num_total_neg_init if self.sampling else num_total_pos_init) # target for refinement stage center_list, valid_flag_list = self.get_points(featmap_sizes, img_metas) pts_coordinate_preds_refine = self.offset_to_pts( center_list, pts_preds_refine) bbox_list = [] for i_img, center in enumerate(center_list): bbox = [] for i_lvl in range(len(pts_preds_refine)): bbox_preds_init = self.points2bbox( pts_preds_init[i_lvl].detach()) bbox_shift = bbox_preds_init * self.point_strides[i_lvl] bbox_center = torch.cat( [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1) bbox.append(bbox_center + bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4)) bbox_list.append(bbox) cls_reg_targets_refine = point_target( bbox_list, valid_flag_list, gt_bboxes, img_metas, cfg.refine, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) (labels_list, label_weights_list, bbox_gt_list_refine, candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine, num_total_neg_refine) = cls_reg_targets_refine num_total_samples_refine = ( num_total_pos_refine + num_total_neg_refine if self.sampling else num_total_pos_refine) # compute loss losses_cls, losses_pts_init, losses_pts_refine = multi_apply( self.loss_single, cls_scores, pts_coordinate_preds_init, pts_coordinate_preds_refine, labels_list, label_weights_list, bbox_gt_list_init, bbox_weights_list_init, bbox_gt_list_refine, bbox_weights_list_refine, self.point_strides, num_total_samples_init=num_total_samples_init, num_total_samples_refine=num_total_samples_refine) loss_dict_all = { 'loss_cls': losses_cls, 'loss_pts_init': losses_pts_init, 'loss_pts_refine': losses_pts_refine } return loss_dict_all def get_bboxes(self, cls_scores, pts_preds_init, pts_preds_refine, img_metas, cfg, rescale=False, nms=True): assert len(cls_scores) == len(pts_preds_refine) bbox_preds_refine = [ self.points2bbox(pts_pred_refine) for pts_pred_refine in pts_preds_refine ] num_levels = len(cls_scores) mlvl_points = [ self.point_generators[i].grid_points(cls_scores[i].size()[-2:], self.point_strides[i]) for i in range(num_levels) ] result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds_refine[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list, mlvl_points, img_shape, scale_factor, cfg, rescale, nms) result_list.append(proposals) return result_list def get_bboxes_single(self, cls_scores, bbox_preds, mlvl_points, img_shape, scale_factor, cfg, rescale=False, nms=True): assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] for i_lvl, (cls_score, bbox_pred, points) in enumerate( zip(cls_scores, bbox_preds, mlvl_points)): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: max_scores, _ = scores[:, 1:].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1) bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1]) y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0]) x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1]) y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0]) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) if nms: det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels else: return mlvl_bboxes, mlvl_scores
27,172
44.515913
79
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/rpn_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import normal_init from mmdet.core import delta2bbox from mmdet.ops import nms from ..registry import HEADS from .anchor_head import AnchorHead @HEADS.register_module class RPNHead(AnchorHead): def __init__(self, in_channels, **kwargs): super(RPNHead, self).__init__(2, in_channels, **kwargs) def _init_layers(self): self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1)#hyadd self.rpn_cls = nn.Conv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 1) self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1) def init_weights(self): normal_init(self.rpn_conv, std=0.01)#hyadd normal_init(self.rpn_cls, std=0.01) normal_init(self.rpn_reg, std=0.01) def forward_single(self, x): x = self.rpn_conv(x)#hyadd x = F.relu(x, inplace=True)#hyadd rpn_cls_score = self.rpn_cls(x) rpn_bbox_pred = self.rpn_reg(x) return rpn_cls_score, rpn_bbox_pred def loss(self, cls_scores, bbox_preds, gt_bboxes, img_metas, cfg, gt_bboxes_ignore=None): losses = super(RPNHead, self).loss( cls_scores, bbox_preds, gt_bboxes, None, img_metas, cfg, gt_bboxes_ignore=gt_bboxes_ignore) return losses # dict( # loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'], # n_rpn_pos=losses['num_total_pos'], n_rpn_neg=losses['num_total_neg']) def get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): mlvl_proposals = [] for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] anchors = mlvl_anchors[idx] rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) scores = rpn_cls_score.softmax(dim=1)[:, 1] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4) if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: _, topk_inds = scores.topk(cfg.nms_pre) rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] scores = scores[topk_inds] proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means, self.target_stds, img_shape) if cfg.min_bbox_size > 0: w = proposals[:, 2] - proposals[:, 0] + 1 h = proposals[:, 3] - proposals[:, 1] + 1 valid_inds = torch.nonzero((w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size)).squeeze() proposals = proposals[valid_inds, :] scores = scores[valid_inds] proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1) proposals, _ = nms(proposals, cfg.nms_thr) proposals = proposals[:cfg.nms_post, :] mlvl_proposals.append(proposals) proposals = torch.cat(mlvl_proposals, 0) if cfg.nms_across_levels: proposals, _ = nms(proposals, cfg.nms_thr) proposals = proposals[:cfg.max_num, :] else: scores = proposals[:, 4] num = min(cfg.max_num, proposals.shape[0]) _, topk_inds = scores.topk(num) proposals = proposals[topk_inds, :] return proposals
4,180
37.712963
83
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/anchor_head.py
from __future__ import division import numpy as np import torch import torch.nn as nn from mmcv.cnn import normal_init from mmdet.core import (AnchorGenerator, anchor_target, delta2bbox, force_fp32, multi_apply, multiclass_nms) from ..builder import build_loss from ..registry import HEADS @HEADS.register_module class AnchorHead(nn.Module): """Anchor-based head (RPN, RetinaNet, SSD, etc.). Args: num_classes (int): Number of categories including the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. anchor_scales (Iterable): Anchor scales. anchor_ratios (Iterable): Anchor aspect ratios. anchor_strides (Iterable): Anchor strides. anchor_base_sizes (Iterable): Anchor base sizes. target_means (Iterable): Mean values of regression targets. target_stds (Iterable): Std values of regression targets. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of localization loss. """ # noqa: W605 def __init__(self, num_classes, in_channels, feat_channels=256, anchor_scales=[8, 16, 32], anchor_ratios=[0.5, 1.0, 2.0], anchor_strides=[4, 8, 16, 32, 64], anchor_base_sizes=None, target_means=(.0, .0, .0, .0), target_stds=(1.0, 1.0, 1.0, 1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)): super(AnchorHead, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.anchor_scales = anchor_scales self.anchor_ratios = anchor_ratios self.anchor_strides = anchor_strides self.anchor_base_sizes = list( anchor_strides) if anchor_base_sizes is None else anchor_base_sizes self.target_means = target_means self.target_stds = target_stds self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.sampling = loss_cls['type'] not in ['FocalLoss', 'GHMC'] if self.use_sigmoid_cls: self.cls_out_channels = num_classes - 1 else: self.cls_out_channels = num_classes if self.cls_out_channels <= 0: raise ValueError('num_classes={} is too small'.format(num_classes)) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.fp16_enabled = False self.anchor_generators = [] for i,anchor_base in enumerate(self.anchor_base_sizes):#hyadd self.anchor_generators.append( AnchorGenerator(anchor_base, anchor_scales, anchor_ratios, ctr=[0,0]))#,ctr=[0,0] self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales) self._init_layers() def _init_layers(self): self.conv_cls = nn.Conv2d(self.in_channels, self.num_anchors * self.cls_out_channels, 1) self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1) def init_weights(self): normal_init(self.conv_cls, std=0.01) normal_init(self.conv_reg, std=0.01) def forward_single(self, x): cls_score = self.conv_cls(x) bbox_pred = self.conv_reg(x) return cls_score, bbox_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_anchors(self, featmap_sizes, img_metas, device='cuda'): """Get anchors according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: anchors of each image, valid flags of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # anchors for one time multi_level_anchors = [] for i in range(num_levels): anchors = self.anchor_generators[i].grid_anchors( featmap_sizes[i], self.anchor_strides[i], device=device) multi_level_anchors.append(anchors) anchor_list = [multi_level_anchors for _ in range(num_imgs)] # for each image, we compute valid flags of multi level anchors valid_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] for i in range(num_levels): anchor_stride = self.anchor_strides[i] feat_h, feat_w = featmap_sizes[i] h, w, _ = img_meta['pad_shape'] valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) flags = self.anchor_generators[i].valid_flags( (feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) valid_flag_list.append(multi_level_flags) return anchor_list, valid_flag_list def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples, cfg): # classification loss labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=num_total_samples) # recal = recall(cls_score, labels) # regression loss bbox_targets = bbox_targets.reshape(-1, 4) bbox_weights = bbox_weights.reshape(-1, 4) bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) loss_bbox = self.loss_bbox( bbox_pred, bbox_targets, bbox_weights, avg_factor=num_total_samples) return loss_cls, loss_bbox @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.anchor_generators) device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = anchor_target( anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=self.sampling) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets # print(num_total_pos, num_total_neg) num_total_samples = ( num_total_pos + num_total_neg if self.sampling else num_total_pos) losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples, cfg=cfg) return dict(loss_rpn_cls=losses_cls, loss_rpn_bbox=losses_bbox) @force_fp32(apply_to=('cls_scores', 'bbox_preds')) def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, rescale=False): """ Transform network output for a batch into labeled boxes. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) img_metas (list[dict]): size / scale info for each image cfg (mmcv.Config): test / postprocessing configuration rescale (bool): if True, return boxes in original image space Returns: list[tuple[Tensor, Tensor]]: each item in result_list is 2-tuple. The first item is an (n, 5) tensor, where the first 4 columns are bounding box positions (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between 0 and 1. The second item is a (n,) tensor where each item is the class index of the corresponding box. Example: >>> import mmcv >>> self = AnchorHead(num_classes=9, in_channels=1) >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}] >>> cfg = mmcv.Config(dict( >>> score_thr=0.00, >>> nms=dict(type='nms', iou_thr=1.0), >>> max_per_img=10)) >>> feat = torch.rand(1, 1, 3, 3) >>> cls_score, bbox_pred = self.forward_single(feat) >>> # note the input lists are over different levels, not images >>> cls_scores, bbox_preds = [cls_score], [bbox_pred] >>> result_list = self.get_bboxes(cls_scores, bbox_preds, >>> img_metas, cfg) >>> det_bboxes, det_labels = result_list[0] >>> assert len(result_list) == 1 >>> assert det_bboxes.shape[1] == 5 >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img """ assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) device = cls_scores[0].device mlvl_anchors = [ self.anchor_generators[i].grid_anchors( cls_scores[i].size()[-2:], self.anchor_strides[i], device=device) for i in range(num_levels) ] result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def get_bboxes_single(self, cls_score_list, bbox_pred_list, mlvl_anchors, img_shape, scale_factor, cfg, rescale=False): """ Transform outputs for a single batch item into labeled boxes. """ assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, anchors in zip(cls_score_list, bbox_pred_list, mlvl_anchors): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: # Get maximum scores for foreground classes. if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: max_scores, _ = scores[:, 1:].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = delta2bbox(anchors, bbox_pred, self.target_means, self.target_stds, img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: # Add a dummy background class to the front when using sigmoid padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels
14,032
41.268072
97
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/retina_head.py
import numpy as np import torch.nn as nn from mmcv.cnn import normal_init from ..registry import HEADS from ..utils import ConvModule, bias_init_with_prob from .anchor_head import AnchorHead @HEADS.register_module class RetinaHead(AnchorHead): """ An anchor-based head used in [1]_. The head contains two subnetworks. The first classifies anchor boxes and the second regresses deltas for the anchors. References: .. [1] https://arxiv.org/pdf/1708.02002.pdf Example: >>> import torch >>> self = RetinaHead(11, 7) >>> x = torch.rand(1, 7, 32, 32) >>> cls_score, bbox_pred = self.forward_single(x) >>> # Each anchor predicts a score for each class except background >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors >>> assert cls_per_anchor == (self.num_classes - 1) >>> assert box_per_anchor == 4 """ def __init__(self, num_classes, in_channels, stacked_convs=4, octave_base_scale=4, scales_per_octave=3, conv_cfg=None, norm_cfg=None, **kwargs): self.stacked_convs = stacked_convs self.octave_base_scale = octave_base_scale self.scales_per_octave = scales_per_octave self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg octave_scales = np.array( [2**(i / scales_per_octave) for i in range(scales_per_octave)]) anchor_scales = octave_scales * octave_base_scale super(RetinaHead, self).__init__( num_classes, in_channels, **kwargs)#anchor_scales=anchor_scales, def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d( self.feat_channels, self.num_anchors * 4, 3, padding=1) def init_weights(self): for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward_single(self, x): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) cls_score = self.retina_cls(cls_feat) bbox_pred = self.retina_reg(reg_feat) return cls_score, bbox_pred
3,603
33.653846
77
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/ga_rpn_head.py
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import normal_init from mmdet.core import delta2bbox from mmdet.ops import nms from ..registry import HEADS from .guided_anchor_head import GuidedAnchorHead @HEADS.register_module class GARPNHead(GuidedAnchorHead): """Guided-Anchor-based RPN head.""" def __init__(self, in_channels, **kwargs): super(GARPNHead, self).__init__(2, in_channels, **kwargs) def _init_layers(self): self.rpn_conv = nn.Conv2d( self.in_channels, self.feat_channels, 3, padding=1) super(GARPNHead, self)._init_layers() def init_weights(self): normal_init(self.rpn_conv, std=0.01) super(GARPNHead, self).init_weights() def forward_single(self, x): x = self.rpn_conv(x) x = F.relu(x, inplace=True) (cls_score, bbox_pred, shape_pred, loc_pred) = super(GARPNHead, self).forward_single(x) return cls_score, bbox_pred, shape_pred, loc_pred def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, img_metas, cfg, gt_bboxes_ignore=None): losses = super(GARPNHead, self).loss( cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, None, img_metas, cfg, gt_bboxes_ignore=gt_bboxes_ignore) return dict( loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'], loss_anchor_shape=losses['loss_shape'], loss_anchor_loc=losses['loss_loc']) def get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): mlvl_proposals = [] for idx in range(len(cls_scores)): rpn_cls_score = cls_scores[idx] rpn_bbox_pred = bbox_preds[idx] anchors = mlvl_anchors[idx] mask = mlvl_masks[idx] assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue rpn_cls_score = rpn_cls_score.permute(1, 2, 0) if self.use_sigmoid_cls: rpn_cls_score = rpn_cls_score.reshape(-1) scores = rpn_cls_score.sigmoid() else: rpn_cls_score = rpn_cls_score.reshape(-1, 2) scores = rpn_cls_score.softmax(dim=1)[:, 1] # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask] rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)[mask, :] if scores.dim() == 0: rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0) anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre: _, topk_inds = scores.topk(cfg.nms_pre) rpn_bbox_pred = rpn_bbox_pred[topk_inds, :] anchors = anchors[topk_inds, :] scores = scores[topk_inds] # get proposals w.r.t. anchors and rpn_bbox_pred proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means, self.target_stds, img_shape) # filter out too small bboxes if cfg.min_bbox_size > 0: w = proposals[:, 2] - proposals[:, 0] + 1 h = proposals[:, 3] - proposals[:, 1] + 1 valid_inds = torch.nonzero((w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size)).squeeze() proposals = proposals[valid_inds, :] scores = scores[valid_inds] proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1) # NMS in current level proposals, _ = nms(proposals, cfg.nms_thr) proposals = proposals[:cfg.nms_post, :] mlvl_proposals.append(proposals) proposals = torch.cat(mlvl_proposals, 0) if cfg.nms_across_levels: # NMS across multi levels proposals, _ = nms(proposals, cfg.nms_thr) proposals = proposals[:cfg.max_num, :] else: scores = proposals[:, 4] num = min(cfg.max_num, proposals.shape[0]) _, topk_inds = scores.topk(num) proposals = proposals[topk_inds, :] return proposals
4,981
37.921875
78
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/ga_retina_head.py
import torch.nn as nn from mmcv.cnn import normal_init from mmdet.ops import MaskedConv2d from ..registry import HEADS from ..utils import ConvModule, bias_init_with_prob from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead @HEADS.register_module class GARetinaHead(GuidedAnchorHead): """Guided-Anchor-based RetinaNet head.""" def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, **kwargs): self.stacked_convs = stacked_convs self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg super(GARetinaHead, self).__init__(num_classes, in_channels, **kwargs) def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, 1) self.feature_adaption_cls = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups) self.feature_adaption_reg = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups) self.retina_cls = MaskedConv2d( self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.retina_reg = MaskedConv2d( self.feat_channels, self.num_anchors * 4, 3, padding=1) def init_weights(self): for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) self.feature_adaption_cls.init_weights() self.feature_adaption_reg.init_weights() bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_loc, std=0.01, bias=bias_cls) normal_init(self.conv_shape, std=0.01) normal_init(self.retina_cls, std=0.01, bias=bias_cls) normal_init(self.retina_reg, std=0.01) def forward_single(self, x): cls_feat = x reg_feat = x for cls_conv in self.cls_convs: cls_feat = cls_conv(cls_feat) for reg_conv in self.reg_convs: reg_feat = reg_conv(reg_feat) loc_pred = self.conv_loc(cls_feat) shape_pred = self.conv_shape(reg_feat) cls_feat = self.feature_adaption_cls(cls_feat, shape_pred) reg_feat = self.feature_adaption_reg(reg_feat, shape_pred) if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.retina_cls(cls_feat, mask) bbox_pred = self.retina_reg(reg_feat, mask) return cls_score, bbox_pred, shape_pred, loc_pred
3,760
33.824074
78
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/ssd_head.py
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import xavier_init from mmdet.core import AnchorGenerator, anchor_target, multi_apply from ..losses import smooth_l1_loss from ..registry import HEADS from .anchor_head import AnchorHead # TODO: add loss evaluator for SSD @HEADS.register_module class SSDHead(AnchorHead): def __init__(self, input_size=300, num_classes=81, in_channels=(512, 1024, 512, 256, 256, 256), anchor_strides=(8, 16, 32, 64, 100, 300), basesize_ratio_range=(0.1, 0.9), anchor_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]), target_means=(.0, .0, .0, .0), target_stds=(1.0, 1.0, 1.0, 1.0)): super(AnchorHead, self).__init__() self.input_size = input_size self.num_classes = num_classes self.in_channels = in_channels self.cls_out_channels = num_classes num_anchors = [len(ratios) * 2 + 2 for ratios in anchor_ratios] reg_convs = [] cls_convs = [] for i in range(len(in_channels)): reg_convs.append( nn.Conv2d( in_channels[i], num_anchors[i] * 4, kernel_size=3, padding=1)) cls_convs.append( nn.Conv2d( in_channels[i], num_anchors[i] * num_classes, kernel_size=3, padding=1)) self.reg_convs = nn.ModuleList(reg_convs) self.cls_convs = nn.ModuleList(cls_convs) min_ratio, max_ratio = basesize_ratio_range min_ratio = int(min_ratio * 100) max_ratio = int(max_ratio * 100) step = int(np.floor(max_ratio - min_ratio) / (len(in_channels) - 2)) min_sizes = [] max_sizes = [] for r in range(int(min_ratio), int(max_ratio) + 1, step): min_sizes.append(int(input_size * r / 100)) max_sizes.append(int(input_size * (r + step) / 100)) if input_size == 300: if basesize_ratio_range[0] == 0.15: # SSD300 COCO min_sizes.insert(0, int(input_size * 7 / 100)) max_sizes.insert(0, int(input_size * 15 / 100)) elif basesize_ratio_range[0] == 0.2: # SSD300 VOC min_sizes.insert(0, int(input_size * 10 / 100)) max_sizes.insert(0, int(input_size * 20 / 100)) elif input_size == 512: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(input_size * 4 / 100)) max_sizes.insert(0, int(input_size * 10 / 100)) elif basesize_ratio_range[0] == 0.15: # SSD512 VOC min_sizes.insert(0, int(input_size * 7 / 100)) max_sizes.insert(0, int(input_size * 15 / 100)) self.anchor_generators = [] self.anchor_strides = anchor_strides for k in range(len(anchor_strides)): base_size = min_sizes[k] stride = anchor_strides[k] ctr = ((stride - 1) / 2., (stride - 1) / 2.) scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] ratios = [1.] for r in anchor_ratios[k]: ratios += [1 / r, r] # 4 or 6 ratio anchor_generator = AnchorGenerator( base_size, scales, ratios, scale_major=False, ctr=ctr) indices = list(range(len(ratios))) indices.insert(1, len(indices)) anchor_generator.base_anchors = torch.index_select( anchor_generator.base_anchors, 0, torch.LongTensor(indices)) self.anchor_generators.append(anchor_generator) self.target_means = target_means self.target_stds = target_stds self.use_sigmoid_cls = False self.cls_focal_loss = False self.fp16_enabled = False def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform', bias=0) def forward(self, feats): cls_scores = [] bbox_preds = [] for feat, reg_conv, cls_conv in zip(feats, self.reg_convs, self.cls_convs): cls_scores.append(cls_conv(feat)) bbox_preds.append(reg_conv(feat)) return cls_scores, bbox_preds def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples, cfg): loss_cls_all = F.cross_entropy( cls_score, labels, reduction='none') * label_weights pos_inds = (labels > 0).nonzero().view(-1) neg_inds = (labels == 0).nonzero().view(-1) num_pos_samples = pos_inds.size(0) num_neg_samples = cfg.neg_pos_ratio * num_pos_samples if num_neg_samples > neg_inds.size(0): num_neg_samples = neg_inds.size(0) topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples) loss_cls_pos = loss_cls_all[pos_inds].sum() loss_cls_neg = topk_loss_cls_neg.sum() loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples loss_bbox = smooth_l1_loss( bbox_pred, bbox_targets, bbox_weights, beta=cfg.smoothl1_beta, avg_factor=num_total_samples) return loss_cls[None], loss_bbox def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.anchor_generators) device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, img_metas, device=device) cls_reg_targets = anchor_target( anchor_list, valid_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=1, sampling=False, unmap_outputs=False) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_images = len(img_metas) all_cls_scores = torch.cat([ s.permute(0, 2, 3, 1).reshape( num_images, -1, self.cls_out_channels) for s in cls_scores ], 1) all_labels = torch.cat(labels_list, -1).view(num_images, -1) all_label_weights = torch.cat(label_weights_list, -1).view(num_images, -1) all_bbox_preds = torch.cat([ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4) for b in bbox_preds ], -2) all_bbox_targets = torch.cat(bbox_targets_list, -2).view(num_images, -1, 4) all_bbox_weights = torch.cat(bbox_weights_list, -2).view(num_images, -1, 4) losses_cls, losses_bbox = multi_apply( self.loss_single, all_cls_scores, all_bbox_preds, all_labels, all_label_weights, all_bbox_targets, all_bbox_weights, num_total_samples=num_total_pos, cfg=cfg) return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
7,762
38.607143
79
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/fcos_head.py
import torch import torch.nn as nn from mmcv.cnn import normal_init from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms from ..builder import build_loss from ..registry import HEADS from ..utils import ConvModule, Scale, bias_init_with_prob INF = 1e8 @HEADS.register_module class FCOSHead(nn.Module): """ Fully Convolutional One-Stage Object Detection head from [1]_. The FCOS head does not use anchor boxes. Instead bounding boxes are predicted at each pixel and a centerness measure is used to supress low-quality predictions. References: .. [1] https://arxiv.org/abs/1904.01355 Example: >>> self = FCOSHead(11, 7) >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] >>> cls_score, bbox_pred, centerness = self.forward(feats) >>> assert len(cls_score) == len(self.scales) """ def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), (512, INF)), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)): super(FCOSHead, self).__init__() self.num_classes = num_classes self.cls_out_channels = num_classes - 1 self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.regress_ranges = regress_ranges self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.loss_centerness = build_loss(loss_centerness) self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self._init_layers() def _init_layers(self): self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.fcos_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) def init_weights(self): for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.fcos_cls, std=0.01, bias=bias_cls) normal_init(self.fcos_reg, std=0.01) normal_init(self.fcos_centerness, std=0.01) def forward(self, feats): return multi_apply(self.forward_single, feats, self.scales) def forward_single(self, x, scale): cls_feat = x reg_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.fcos_cls(cls_feat) centerness = self.fcos_centerness(cls_feat) for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) # scale the bbox_pred of different level # float to avoid overflow when enabling FP16 bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp() return cls_score, bbox_pred, centerness @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def loss(self, cls_scores, bbox_preds, centernesses, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): assert len(cls_scores) == len(bbox_preds) == len(centernesses) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes, gt_labels) num_imgs = cls_scores[0].size(0) # flatten cls_scores, bbox_preds and centerness flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_centerness = [ centerness.permute(0, 2, 3, 1).reshape(-1) for centerness in centernesses ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_centerness = torch.cat(flatten_centerness) flatten_labels = torch.cat(labels) flatten_bbox_targets = torch.cat(bbox_targets) # repeat points to align with bbox_preds flatten_points = torch.cat( [points.repeat(num_imgs, 1) for points in all_level_points]) pos_inds = flatten_labels.nonzero().reshape(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) # avoid num_pos is 0 pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_centerness = flatten_centerness[pos_inds] if num_pos > 0: pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_centerness_targets = self.centerness_target(pos_bbox_targets) pos_points = flatten_points[pos_inds] pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) pos_decoded_target_preds = distance2bbox(pos_points, pos_bbox_targets) # centerness weighted iou loss loss_bbox = self.loss_bbox( pos_decoded_bbox_preds, pos_decoded_target_preds, weight=pos_centerness_targets, avg_factor=pos_centerness_targets.sum()) loss_centerness = self.loss_centerness(pos_centerness, pos_centerness_targets) else: loss_bbox = pos_bbox_preds.sum() loss_centerness = pos_centerness.sum() return dict( loss_cls=loss_cls, loss_bbox=loss_bbox, loss_centerness=loss_centerness) @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) def get_bboxes(self, cls_scores, bbox_preds, centernesses, img_metas, cfg, rescale=None): assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] centerness_pred_list = [ centernesses[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list, centerness_pred_list, mlvl_points, img_shape, scale_factor, cfg, rescale) result_list.append(det_bboxes) return result_list def get_bboxes_single(self, cls_scores, bbox_preds, centernesses, mlvl_points, img_shape, scale_factor, cfg, rescale=False): assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) mlvl_bboxes = [] mlvl_scores = [] mlvl_centerness = [] for cls_score, bbox_pred, centerness, points in zip( cls_scores, bbox_preds, centernesses, mlvl_points): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: max_scores, _ = (scores * centerness[:, None]).max(dim=1) _, topk_inds = max_scores.topk(nms_pre) points = points[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] centerness = centerness[topk_inds] bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_centerness.append(centerness) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) mlvl_centerness = torch.cat(mlvl_centerness) det_bboxes, det_labels = multiclass_nms( mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img, score_factors=mlvl_centerness) return det_bboxes, det_labels def get_points(self, featmap_sizes, dtype, device): """Get points according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. dtype (torch.dtype): Type of points. device (torch.device): Device of points. Returns: tuple: points of each image. """ mlvl_points = [] for i in range(len(featmap_sizes)): mlvl_points.append( self.get_points_single(featmap_sizes[i], self.strides[i], dtype, device)) return mlvl_points def get_points_single(self, featmap_size, stride, dtype, device): h, w = featmap_size x_range = torch.arange( 0, w * stride, stride, dtype=dtype, device=device) y_range = torch.arange( 0, h * stride, stride, dtype=dtype, device=device) y, x = torch.meshgrid(y_range, x_range) points = torch.stack( (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2 return points def fcos_target(self, points, gt_bboxes_list, gt_labels_list): assert len(points) == len(self.regress_ranges) num_levels = len(points) # expand regress ranges to align with points expanded_regress_ranges = [ points[i].new_tensor(self.regress_ranges[i])[None].expand_as( points[i]) for i in range(num_levels) ] # concat all levels points and regress ranges concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) concat_points = torch.cat(points, dim=0) # get labels and bbox_targets of each image labels_list, bbox_targets_list = multi_apply( self.fcos_target_single, gt_bboxes_list, gt_labels_list, points=concat_points, regress_ranges=concat_regress_ranges) # split to per img, per level num_points = [center.size(0) for center in points] labels_list = [labels.split(num_points, 0) for labels in labels_list] bbox_targets_list = [ bbox_targets.split(num_points, 0) for bbox_targets in bbox_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_bbox_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) concat_lvl_bbox_targets.append( torch.cat( [bbox_targets[i] for bbox_targets in bbox_targets_list])) return concat_lvl_labels, concat_lvl_bbox_targets def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges): num_points = points.size(0) num_gts = gt_labels.size(0) if num_gts == 0: return gt_labels.new_zeros(num_points), \ gt_bboxes.new_zeros((num_points, 4)) areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1) # TODO: figure out why these two are different # areas = areas[None].expand(num_points, num_gts) areas = areas[None].repeat(num_points, 1) regress_ranges = regress_ranges[:, None, :].expand( num_points, num_gts, 2) gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) xs, ys = points[:, 0], points[:, 1] xs = xs[:, None].expand(num_points, num_gts) ys = ys[:, None].expand(num_points, num_gts) left = xs - gt_bboxes[..., 0] right = gt_bboxes[..., 2] - xs top = ys - gt_bboxes[..., 1] bottom = gt_bboxes[..., 3] - ys bbox_targets = torch.stack((left, top, right, bottom), -1) # condition1: inside a gt bbox inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 # condition2: limit the regression range for each location max_regress_distance = bbox_targets.max(-1)[0] inside_regress_range = ( max_regress_distance >= regress_ranges[..., 0]) & ( max_regress_distance <= regress_ranges[..., 1]) # if there are still more than one objects for a location, # we choose the one with minimal area areas[inside_gt_bbox_mask == 0] = INF areas[inside_regress_range == 0] = INF min_area, min_area_inds = areas.min(dim=1) labels = gt_labels[min_area_inds] labels[min_area == INF] = 0 bbox_targets = bbox_targets[range(num_points), min_area_inds] return labels, bbox_targets def centerness_target(self, pos_bbox_targets): # only calculate pos centerness targets, otherwise there may be nan left_right = pos_bbox_targets[:, [0, 2]] top_bottom = pos_bbox_targets[:, [1, 3]] centerness_targets = ( left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(centerness_targets)
16,509
39.366748
79
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/free_anchor_retina_head.py
import torch import torch.nn.functional as F from mmdet.core import bbox2delta, bbox_overlaps, delta2bbox from ..registry import HEADS from .retina_head import RetinaHead @HEADS.register_module class FreeAnchorRetinaHead(RetinaHead): def __init__(self, num_classes, in_channels, stacked_convs=4, octave_base_scale=4, scales_per_octave=3, conv_cfg=None, norm_cfg=None, pre_anchor_topk=50, bbox_thr=0.6, gamma=2.0, alpha=0.5, **kwargs): super(FreeAnchorRetinaHead, self).__init__(num_classes, in_channels, stacked_convs, octave_base_scale, scales_per_octave, conv_cfg, norm_cfg, **kwargs) self.pre_anchor_topk = pre_anchor_topk self.bbox_thr = bbox_thr self.gamma = gamma self.alpha = alpha def loss(self, cls_scores, bbox_preds, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.anchor_generators) anchor_list, _ = self.get_anchors(featmap_sizes, img_metas) anchors = [torch.cat(anchor) for anchor in anchor_list] # concatenate each level cls_scores = [ cls.permute(0, 2, 3, 1).reshape(cls.size(0), -1, self.cls_out_channels) for cls in cls_scores ] bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4) for bbox_pred in bbox_preds ] cls_scores = torch.cat(cls_scores, dim=1) bbox_preds = torch.cat(bbox_preds, dim=1) cls_prob = torch.sigmoid(cls_scores) box_prob = [] num_pos = 0 positive_losses = [] for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_, bbox_preds_) in enumerate( zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)): gt_labels_ -= 1 with torch.no_grad(): # box_localization: a_{j}^{loc}, shape: [j, 4] pred_boxes = delta2bbox(anchors_, bbox_preds_, self.target_means, self.target_stds) # object_box_iou: IoU_{ij}^{loc}, shape: [i, j] object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes) # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j] t1 = self.bbox_thr t2 = object_box_iou.max( dim=1, keepdim=True).values.clamp(min=t1 + 1e-12) object_box_prob = ((object_box_iou - t1) / (t2 - t1)).clamp( min=0, max=1) # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j] num_obj = gt_labels_.size(0) indices = torch.stack( [torch.arange(num_obj).type_as(gt_labels_), gt_labels_], dim=0) object_cls_box_prob = torch.sparse_coo_tensor( indices, object_box_prob) # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j] """ from "start" to "end" implement: image_box_iou = torch.sparse.max(object_cls_box_prob, dim=0).t() """ # start box_cls_prob = torch.sparse.sum( object_cls_box_prob, dim=0).to_dense() indices = torch.nonzero(box_cls_prob).t_() if indices.numel() == 0: image_box_prob = torch.zeros( anchors_.size(0), self.cls_out_channels).type_as(object_box_prob) else: nonzero_box_prob = torch.where( (gt_labels_.unsqueeze(dim=-1) == indices[0]), object_box_prob[:, indices[1]], torch.tensor( [0]).type_as(object_box_prob)).max(dim=0).values # upmap to shape [j, c] image_box_prob = torch.sparse_coo_tensor( indices.flip([0]), nonzero_box_prob, size=(anchors_.size(0), self.cls_out_channels)).to_dense() # end box_prob.append(image_box_prob) # construct bags for objects match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_) _, matched = torch.topk( match_quality_matrix, self.pre_anchor_topk, dim=1, sorted=False) del match_quality_matrix # matched_cls_prob: P_{ij}^{cls} matched_cls_prob = torch.gather( cls_prob_[matched], 2, gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk, 1)).squeeze(2) # matched_box_prob: P_{ij}^{loc} matched_anchors = anchors_[matched] matched_object_targets = bbox2delta( matched_anchors, gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors), self.target_means, self.target_stds) loss_bbox = self.loss_bbox( bbox_preds_[matched], matched_object_targets, reduction_override='none').sum(-1) matched_box_prob = torch.exp(-loss_bbox) # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )} num_pos += len(gt_bboxes_) positive_losses.append( self.positive_bag_loss(matched_cls_prob, matched_box_prob)) positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos) # box_prob: P{a_{j} \in A_{+}} box_prob = torch.stack(box_prob, dim=0) # negative_loss: # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B|| negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max( 1, num_pos * self.pre_anchor_topk) losses = { 'positive_bag_loss': positive_loss, 'negative_bag_loss': negative_loss } return losses def positive_bag_loss(self, matched_cls_prob, matched_box_prob): # bag_prob = Mean-max(matched_prob) matched_prob = matched_cls_prob * matched_box_prob weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None) weight /= weight.sum(dim=1).unsqueeze(dim=-1) bag_prob = (weight * matched_prob).sum(dim=1) # positive_bag_loss = -self.alpha * log(bag_prob) return self.alpha * F.binary_cross_entropy( bag_prob, torch.ones_like(bag_prob), reduction='none') def negative_bag_loss(self, cls_prob, box_prob): prob = cls_prob * (1 - box_prob) negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( prob, torch.zeros_like(prob), reduction='none') return (1 - self.alpha) * negative_bag_loss
7,396
38.137566
79
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/guided_anchor_head.py
from __future__ import division import numpy as np import torch import torch.nn as nn from mmcv.cnn import normal_init from mmdet.core import (AnchorGenerator, anchor_inside_flags, anchor_target, delta2bbox, force_fp32, ga_loc_target, ga_shape_target, multi_apply, multiclass_nms) from mmdet.ops import DeformConv, MaskedConv2d from ..builder import build_loss from ..registry import HEADS from ..utils import bias_init_with_prob from .anchor_head import AnchorHead class FeatureAdaption(nn.Module): """Feature Adaption Module. Feature Adaption Module is implemented based on DCN v1. It uses anchor shape prediction rather than feature map to predict offsets of deformable conv layer. Args: in_channels (int): Number of channels in the input feature map. out_channels (int): Number of channels in the output feature map. kernel_size (int): Deformable conv kernel size. deformable_groups (int): Deformable conv group size. """ def __init__(self, in_channels, out_channels, kernel_size=3, deformable_groups=4): super(FeatureAdaption, self).__init__() offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 2, deformable_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deformable_groups=deformable_groups) self.relu = nn.ReLU(inplace=True) def init_weights(self): normal_init(self.conv_offset, std=0.1) normal_init(self.conv_adaption, std=0.01) def forward(self, x, shape): offset = self.conv_offset(shape.detach()) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module class GuidedAnchorHead(AnchorHead): """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.). This GuidedAnchorHead will predict high-quality feature guided anchors and locations where anchors will be kept in inference. There are mainly 3 categories of bounding-boxes. - Sampled (9) pairs for target assignment. (approxes) - The square boxes where the predicted anchors are based on. (squares) - Guided anchors. Please refer to https://arxiv.org/abs/1901.03278 for more details. Args: num_classes (int): Number of classes. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. octave_base_scale (int): Base octave scale of each level of feature map. scales_per_octave (int): Number of octave scales in each level of feature map octave_ratios (Iterable): octave aspect ratios. anchor_strides (Iterable): Anchor strides. anchor_base_sizes (Iterable): Anchor base sizes. anchoring_means (Iterable): Mean values of anchoring targets. anchoring_stds (Iterable): Std values of anchoring targets. target_means (Iterable): Mean values of regression targets. target_stds (Iterable): Std values of regression targets. deformable_groups: (int): Group number of DCN in FeatureAdaption module. loc_filter_thr (float): Threshold to filter out unconcerned regions. loss_loc (dict): Config of location loss. loss_shape (dict): Config of anchor shape loss. loss_cls (dict): Config of classification loss. loss_bbox (dict): Config of bbox regression loss. """ def __init__( self, num_classes, in_channels, feat_channels=256, octave_base_scale=8, scales_per_octave=3, octave_ratios=[0.5, 1.0, 2.0], anchor_strides=[4, 8, 16, 32, 64], anchor_base_sizes=None, anchoring_means=(.0, .0, .0, .0), anchoring_stds=(1.0, 1.0, 1.0, 1.0), target_means=(.0, .0, .0, .0), target_stds=(1.0, 1.0, 1.0, 1.0), deformable_groups=4, loc_filter_thr=0.01, loss_loc=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)): # yapf: disable super(AnchorHead, self).__init__() self.in_channels = in_channels self.num_classes = num_classes self.feat_channels = feat_channels self.octave_base_scale = octave_base_scale self.scales_per_octave = scales_per_octave self.octave_scales = octave_base_scale * np.array( [2**(i / scales_per_octave) for i in range(scales_per_octave)]) self.approxs_per_octave = len(self.octave_scales) * len(octave_ratios) self.octave_ratios = octave_ratios self.anchor_strides = anchor_strides self.anchor_base_sizes = list( anchor_strides) if anchor_base_sizes is None else anchor_base_sizes self.anchoring_means = anchoring_means self.anchoring_stds = anchoring_stds self.target_means = target_means self.target_stds = target_stds self.deformable_groups = deformable_groups self.loc_filter_thr = loc_filter_thr self.approx_generators = [] self.square_generators = [] for anchor_base in self.anchor_base_sizes: # Generators for approxs self.approx_generators.append( AnchorGenerator(anchor_base, self.octave_scales, self.octave_ratios)) # Generators for squares self.square_generators.append( AnchorGenerator(anchor_base, [self.octave_base_scale], [1.0])) # one anchor per location self.num_anchors = 1 self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) self.cls_focal_loss = loss_cls['type'] in ['FocalLoss'] self.loc_focal_loss = loss_loc['type'] in ['FocalLoss'] if self.use_sigmoid_cls: self.cls_out_channels = self.num_classes - 1 else: self.cls_out_channels = self.num_classes # build losses self.loss_loc = build_loss(loss_loc) self.loss_shape = build_loss(loss_shape) self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.fp16_enabled = False self._init_layers() def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.conv_loc = nn.Conv2d(self.in_channels, 1, 1) self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1) self.feature_adaption = FeatureAdaption( self.in_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups) self.conv_cls = MaskedConv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 1) self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4, 1) def init_weights(self): normal_init(self.conv_cls, std=0.01) normal_init(self.conv_reg, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.conv_loc, std=0.01, bias=bias_cls) normal_init(self.conv_shape, std=0.01) self.feature_adaption.init_weights() def forward_single(self, x): loc_pred = self.conv_loc(x) shape_pred = self.conv_shape(x) x = self.feature_adaption(x, shape_pred) # masked conv is only used during inference for speed-up if not self.training: mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr else: mask = None cls_score = self.conv_cls(x, mask) bbox_pred = self.conv_reg(x, mask) return cls_score, bbox_pred, shape_pred, loc_pred def forward(self, feats): return multi_apply(self.forward_single, feats) def get_sampled_approxs(self, featmap_sizes, img_metas, cfg, device='cuda'): """Get sampled approxs and inside flags according to feature map sizes. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. img_metas (list[dict]): Image meta info. device (torch.device | str): device for returned tensors Returns: tuple: approxes of each image, inside flags of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # approxes for one time multi_level_approxs = [] for i in range(num_levels): approxs = self.approx_generators[i].grid_anchors( featmap_sizes[i], self.anchor_strides[i], device=device) multi_level_approxs.append(approxs) approxs_list = [multi_level_approxs for _ in range(num_imgs)] # for each image, we compute inside flags of multi level approxes inside_flag_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_flags = [] multi_level_approxs = approxs_list[img_id] for i in range(num_levels): approxs = multi_level_approxs[i] anchor_stride = self.anchor_strides[i] feat_h, feat_w = featmap_sizes[i] h, w, _ = img_meta['pad_shape'] valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w) flags = self.approx_generators[i].valid_flags( (feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) inside_flags_list = [] for i in range(self.approxs_per_octave): split_valid_flags = flags[i::self.approxs_per_octave] split_approxs = approxs[i::self.approxs_per_octave, :] inside_flags = anchor_inside_flags( split_approxs, split_valid_flags, img_meta['img_shape'][:2], cfg.allowed_border) inside_flags_list.append(inside_flags) # inside_flag for a position is true if any anchor in this # position is true inside_flags = ( torch.stack(inside_flags_list, 0).sum(dim=0) > 0) multi_level_flags.append(inside_flags) inside_flag_list.append(multi_level_flags) return approxs_list, inside_flag_list def get_anchors(self, featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=False, device='cuda'): """Get squares according to feature map sizes and guided anchors. Args: featmap_sizes (list[tuple]): Multi-level feature map sizes. shape_preds (list[tensor]): Multi-level shape predictions. loc_preds (list[tensor]): Multi-level location predictions. img_metas (list[dict]): Image meta info. use_loc_filter (bool): Use loc filter or not. device (torch.device | str): device for returned tensors Returns: tuple: square approxs of each image, guided anchors of each image, loc masks of each image """ num_imgs = len(img_metas) num_levels = len(featmap_sizes) # since feature map sizes of all images are the same, we only compute # squares for one time multi_level_squares = [] for i in range(num_levels): squares = self.square_generators[i].grid_anchors( featmap_sizes[i], self.anchor_strides[i], device=device) multi_level_squares.append(squares) squares_list = [multi_level_squares for _ in range(num_imgs)] # for each image, we compute multi level guided anchors guided_anchors_list = [] loc_mask_list = [] for img_id, img_meta in enumerate(img_metas): multi_level_guided_anchors = [] multi_level_loc_mask = [] for i in range(num_levels): squares = squares_list[img_id][i] shape_pred = shape_preds[i][img_id] loc_pred = loc_preds[i][img_id] guided_anchors, loc_mask = self.get_guided_anchors_single( squares, shape_pred, loc_pred, use_loc_filter=use_loc_filter) multi_level_guided_anchors.append(guided_anchors) multi_level_loc_mask.append(loc_mask) guided_anchors_list.append(multi_level_guided_anchors) loc_mask_list.append(multi_level_loc_mask) return squares_list, guided_anchors_list, loc_mask_list def get_guided_anchors_single(self, squares, shape_pred, loc_pred, use_loc_filter=False): """Get guided anchors and loc masks for a single level. Args: square (tensor): Squares of a single level. shape_pred (tensor): Shape predections of a single level. loc_pred (tensor): Loc predections of a single level. use_loc_filter (list[tensor]): Use loc filter or not. Returns: tuple: guided anchors, location masks """ # calculate location filtering mask loc_pred = loc_pred.sigmoid().detach() if use_loc_filter: loc_mask = loc_pred >= self.loc_filter_thr else: loc_mask = loc_pred >= 0.0 mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors) mask = mask.contiguous().view(-1) # calculate guided anchors squares = squares[mask] anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view( -1, 2).detach()[mask] bbox_deltas = anchor_deltas.new_full(squares.size(), 0) bbox_deltas[:, 2:] = anchor_deltas guided_anchors = delta2bbox( squares, bbox_deltas, self.anchoring_means, self.anchoring_stds, wh_ratio_clip=1e-6) return guided_anchors, mask def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts, anchor_weights, anchor_total_num): shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2) bbox_anchors = bbox_anchors.contiguous().view(-1, 4) bbox_gts = bbox_gts.contiguous().view(-1, 4) anchor_weights = anchor_weights.contiguous().view(-1, 4) bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0) bbox_deltas[:, 2:] += shape_pred # filter out negative samples to speed-up weighted_bounded_iou_loss inds = torch.nonzero(anchor_weights[:, 0] > 0).squeeze(1) bbox_deltas_ = bbox_deltas[inds] bbox_anchors_ = bbox_anchors[inds] bbox_gts_ = bbox_gts[inds] anchor_weights_ = anchor_weights[inds] pred_anchors_ = delta2bbox( bbox_anchors_, bbox_deltas_, self.anchoring_means, self.anchoring_stds, wh_ratio_clip=1e-6) loss_shape = self.loss_shape( pred_anchors_, bbox_gts_, anchor_weights_, avg_factor=anchor_total_num) return loss_shape def loss_loc_single(self, loc_pred, loc_target, loc_weight, loc_avg_factor, cfg): loss_loc = self.loss_loc( loc_pred.reshape(-1, 1), loc_target.reshape(-1, 1).long(), loc_weight.reshape(-1, 1), avg_factor=loc_avg_factor) return loss_loc @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def loss(self, cls_scores, bbox_preds, shape_preds, loc_preds, gt_bboxes, gt_labels, img_metas, cfg, gt_bboxes_ignore=None): featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] assert len(featmap_sizes) == len(self.approx_generators) device = cls_scores[0].device # get loc targets loc_targets, loc_weights, loc_avg_factor = ga_loc_target( gt_bboxes, featmap_sizes, self.octave_base_scale, self.anchor_strides, center_ratio=cfg.center_ratio, ignore_ratio=cfg.ignore_ratio) # get sampled approxes approxs_list, inside_flag_list = self.get_sampled_approxs( featmap_sizes, img_metas, cfg, device=device) # get squares and guided anchors squares_list, guided_anchors_list, _ = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, device=device) # get shape targets sampling = False if not hasattr(cfg, 'ga_sampler') else True shape_targets = ga_shape_target( approxs_list, inside_flag_list, squares_list, gt_bboxes, img_metas, self.approxs_per_octave, cfg, sampling=sampling) if shape_targets is None: return None (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num, anchor_bg_num) = shape_targets anchor_total_num = ( anchor_fg_num if not sampling else anchor_fg_num + anchor_bg_num) # get anchor targets sampling = False if self.cls_focal_loss else True label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1 cls_reg_targets = anchor_target( guided_anchors_list, inside_flag_list, gt_bboxes, img_metas, self.target_means, self.target_stds, cfg, gt_bboxes_ignore_list=gt_bboxes_ignore, gt_labels_list=gt_labels, label_channels=label_channels, sampling=sampling) if cls_reg_targets is None: return None (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets num_total_samples = ( num_total_pos if self.cls_focal_loss else num_total_pos + num_total_neg) # get classification and bbox regression losses losses_cls, losses_bbox = multi_apply( self.loss_single, cls_scores, bbox_preds, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_samples=num_total_samples, cfg=cfg) # get anchor location loss losses_loc = [] for i in range(len(loc_preds)): loss_loc = self.loss_loc_single( loc_preds[i], loc_targets[i], loc_weights[i], loc_avg_factor=loc_avg_factor, cfg=cfg) losses_loc.append(loss_loc) # get anchor shape loss losses_shape = [] for i in range(len(shape_preds)): loss_shape = self.loss_shape_single( shape_preds[i], bbox_anchors_list[i], bbox_gts_list[i], anchor_weights_list[i], anchor_total_num=anchor_total_num) losses_shape.append(loss_shape) return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_shape=losses_shape, loss_loc=losses_loc) @force_fp32( apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds')) def get_bboxes(self, cls_scores, bbox_preds, shape_preds, loc_preds, img_metas, cfg, rescale=False): assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len( loc_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] device = cls_scores[0].device # get guided anchors _, guided_anchors, loc_masks = self.get_anchors( featmap_sizes, shape_preds, loc_preds, img_metas, use_loc_filter=not self.training, device=device) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] guided_anchor_list = [ guided_anchors[img_id][i].detach() for i in range(num_levels) ] loc_mask_list = [ loc_masks[img_id][i].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list, guided_anchor_list, loc_mask_list, img_shape, scale_factor, cfg, rescale) result_list.append(proposals) return result_list def get_bboxes_single(self, cls_scores, bbox_preds, mlvl_anchors, mlvl_masks, img_shape, scale_factor, cfg, rescale=False): assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors) mlvl_bboxes = [] mlvl_scores = [] for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds, mlvl_anchors, mlvl_masks): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] # if no location is kept, end. if mask.sum() == 0: continue # reshape scores and bbox_pred cls_score = cls_score.permute(1, 2, 0).reshape(-1, self.cls_out_channels) if self.use_sigmoid_cls: scores = cls_score.sigmoid() else: scores = cls_score.softmax(-1) bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) # filter scores, bbox_pred w.r.t. mask. # anchors are filtered in get_anchors() beforehand. scores = scores[mask, :] bbox_pred = bbox_pred[mask, :] if scores.dim() == 0: anchors = anchors.unsqueeze(0) scores = scores.unsqueeze(0) bbox_pred = bbox_pred.unsqueeze(0) # filter anchors, bbox_pred, scores w.r.t. scores nms_pre = cfg.get('nms_pre', -1) if nms_pre > 0 and scores.shape[0] > nms_pre: if self.use_sigmoid_cls: max_scores, _ = scores.max(dim=1) else: max_scores, _ = scores[:, 1:].max(dim=1) _, topk_inds = max_scores.topk(nms_pre) anchors = anchors[topk_inds, :] bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] bboxes = delta2bbox(anchors, bbox_pred, self.target_means, self.target_stds, img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) if self.use_sigmoid_cls: padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([padding, mlvl_scores], dim=1) # multi class NMS det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels
25,250
39.596463
79
py
AlignShift
AlignShift-master/mmdet/models/anchor_heads/fovea_head.py
import torch import torch.nn as nn from mmcv.cnn import normal_init from mmdet.core import multi_apply, multiclass_nms from mmdet.ops import DeformConv from ..builder import build_loss from ..registry import HEADS from ..utils import ConvModule, bias_init_with_prob INF = 1e8 class FeatureAlign(nn.Module): def __init__(self, in_channels, out_channels, kernel_size=3, deformable_groups=4): super(FeatureAlign, self).__init__() offset_channels = kernel_size * kernel_size * 2 self.conv_offset = nn.Conv2d( 4, deformable_groups * offset_channels, 1, bias=False) self.conv_adaption = DeformConv( in_channels, out_channels, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, deformable_groups=deformable_groups) self.relu = nn.ReLU(inplace=True) def init_weights(self): normal_init(self.conv_offset, std=0.1) normal_init(self.conv_adaption, std=0.01) def forward(self, x, shape): offset = self.conv_offset(shape) x = self.relu(self.conv_adaption(x, offset)) return x @HEADS.register_module class FoveaHead(nn.Module): """FoveaBox: Beyond Anchor-based Object Detector https://arxiv.org/abs/1904.03797 """ def __init__(self, num_classes, in_channels, feat_channels=256, stacked_convs=4, strides=(4, 8, 16, 32, 64), base_edge_list=(16, 32, 64, 128, 256), scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)), sigma=0.4, with_deform=False, deformable_groups=4, loss_cls=None, loss_bbox=None, conv_cfg=None, norm_cfg=None): super(FoveaHead, self).__init__() self.num_classes = num_classes self.cls_out_channels = num_classes - 1 self.in_channels = in_channels self.feat_channels = feat_channels self.stacked_convs = stacked_convs self.strides = strides self.base_edge_list = base_edge_list self.scale_ranges = scale_ranges self.sigma = sigma self.with_deform = with_deform self.deformable_groups = deformable_groups self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self._init_layers() def _init_layers(self): self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() # box branch for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.fovea_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # cls branch if not self.with_deform: for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.fovea_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) else: self.cls_convs.append( ConvModule( self.feat_channels, (self.feat_channels * 4), 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.cls_convs.append( ConvModule((self.feat_channels * 4), (self.feat_channels * 4), 1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.feature_adaption = FeatureAlign( self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups) self.fovea_cls = nn.Conv2d( int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1) def init_weights(self): for m in self.cls_convs: normal_init(m.conv, std=0.01) for m in self.reg_convs: normal_init(m.conv, std=0.01) bias_cls = bias_init_with_prob(0.01) normal_init(self.fovea_cls, std=0.01, bias=bias_cls) normal_init(self.fovea_reg, std=0.01) if self.with_deform: self.feature_adaption.init_weights() def forward(self, feats): return multi_apply(self.forward_single, feats) def forward_single(self, x): cls_feat = x reg_feat = x for reg_layer in self.reg_convs: reg_feat = reg_layer(reg_feat) bbox_pred = self.fovea_reg(reg_feat) if self.with_deform: cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp()) for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) cls_score = self.fovea_cls(cls_feat) return cls_score, bbox_pred def get_points(self, featmap_sizes, dtype, device, flatten=False): points = [] for featmap_size in featmap_sizes: x_range = torch.arange( featmap_size[1], dtype=dtype, device=device) + 0.5 y_range = torch.arange( featmap_size[0], dtype=dtype, device=device) + 0.5 y, x = torch.meshgrid(y_range, x_range) if flatten: points.append((y.flatten(), x.flatten())) else: points.append((y, x)) return points def loss(self, cls_scores, bbox_preds, gt_bbox_list, gt_label_list, img_metas, cfg, gt_bboxes_ignore=None): assert len(cls_scores) == len(bbox_preds) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] points = self.get_points(featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device) num_imgs = cls_scores[0].size(0) flatten_cls_scores = [ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) for cls_score in cls_scores ] flatten_bbox_preds = [ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) for bbox_pred in bbox_preds ] flatten_cls_scores = torch.cat(flatten_cls_scores) flatten_bbox_preds = torch.cat(flatten_bbox_preds) flatten_labels, flatten_bbox_targets = self.fovea_target( gt_bbox_list, gt_label_list, featmap_sizes, points) pos_inds = (flatten_labels > 0).nonzero().view(-1) num_pos = len(pos_inds) loss_cls = self.loss_cls( flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs) if num_pos > 0: pos_bbox_preds = flatten_bbox_preds[pos_inds] pos_bbox_targets = flatten_bbox_targets[pos_inds] pos_weights = pos_bbox_targets.new_zeros( pos_bbox_targets.size()) + 1.0 loss_bbox = self.loss_bbox( pos_bbox_preds, pos_bbox_targets, pos_weights, avg_factor=num_pos) else: loss_bbox = torch.tensor([0], dtype=flatten_bbox_preds.dtype, device=flatten_bbox_preds.device) return dict(loss_cls=loss_cls, loss_bbox=loss_bbox) def fovea_target(self, gt_bbox_list, gt_label_list, featmap_sizes, points): label_list, bbox_target_list = multi_apply( self.fovea_target_single, gt_bbox_list, gt_label_list, featmap_size_list=featmap_sizes, point_list=points) flatten_labels = [ torch.cat([ labels_level_img.flatten() for labels_level_img in labels_level ]) for labels_level in zip(*label_list) ] flatten_bbox_targets = [ torch.cat([ bbox_targets_level_img.reshape(-1, 4) for bbox_targets_level_img in bbox_targets_level ]) for bbox_targets_level in zip(*bbox_target_list) ] flatten_labels = torch.cat(flatten_labels) flatten_bbox_targets = torch.cat(flatten_bbox_targets) return flatten_labels, flatten_bbox_targets def fovea_target_single(self, gt_bboxes_raw, gt_labels_raw, featmap_size_list=None, point_list=None): gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) * (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1])) label_list = [] bbox_target_list = [] # for each pyramid, find the cls and box target for base_len, (lower_bound, upper_bound), stride, featmap_size, \ (y, x) in zip(self.base_edge_list, self.scale_ranges, self.strides, featmap_size_list, point_list): labels = gt_labels_raw.new_zeros(featmap_size) bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1], 4) + 1 # scale assignment hit_indices = ((gt_areas >= lower_bound) & (gt_areas <= upper_bound)).nonzero().flatten() if len(hit_indices) == 0: label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) continue _, hit_index_order = torch.sort(-gt_areas[hit_indices]) hit_indices = hit_indices[hit_index_order] gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride gt_labels = gt_labels_raw[hit_indices] half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0]) half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1]) # valid fovea area: left, right, top, down pos_left = torch.ceil( gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long().\ clamp(0, featmap_size[1] - 1) pos_right = torch.floor( gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long().\ clamp(0, featmap_size[1] - 1) pos_top = torch.ceil( gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long().\ clamp(0, featmap_size[0] - 1) pos_down = torch.floor( gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long().\ clamp(0, featmap_size[0] - 1) for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \ zip(pos_left, pos_top, pos_right, pos_down, gt_labels, gt_bboxes_raw[hit_indices, :]): labels[py1:py2 + 1, px1:px2 + 1] = label bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \ (stride * x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \ (stride * y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \ (gt_x2 - stride * x[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \ (gt_y2 - stride * y[py1:py2 + 1, px1:px2 + 1]) / base_len bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.) label_list.append(labels) bbox_target_list.append(torch.log(bbox_targets)) return label_list, bbox_target_list def get_bboxes(self, cls_scores, bbox_preds, img_metas, cfg, rescale=None): assert len(cls_scores) == len(bbox_preds) num_levels = len(cls_scores) featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] points = self.get_points( featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device, flatten=True) result_list = [] for img_id in range(len(img_metas)): cls_score_list = [ cls_scores[i][img_id].detach() for i in range(num_levels) ] bbox_pred_list = [ bbox_preds[i][img_id].detach() for i in range(num_levels) ] img_shape = img_metas[img_id]['img_shape'] scale_factor = img_metas[img_id]['scale_factor'] det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list, featmap_sizes, points, img_shape, scale_factor, cfg, rescale) result_list.append(det_bboxes) return result_list def get_bboxes_single(self, cls_scores, bbox_preds, featmap_sizes, point_list, img_shape, scale_factor, cfg, rescale=False): assert len(cls_scores) == len(bbox_preds) == len(point_list) det_bboxes = [] det_scores = [] for cls_score, bbox_pred, featmap_size, stride, base_len, (y, x) \ in zip(cls_scores, bbox_preds, featmap_sizes, self.strides, self.base_edge_list, point_list): assert cls_score.size()[-2:] == bbox_pred.size()[-2:] scores = cls_score.permute(1, 2, 0).reshape( -1, self.cls_out_channels).sigmoid() bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).exp() nms_pre = cfg.get('nms_pre', -1) if (nms_pre > 0) and (scores.shape[0] > nms_pre): max_scores, _ = scores.max(dim=1) _, topk_inds = max_scores.topk(nms_pre) bbox_pred = bbox_pred[topk_inds, :] scores = scores[topk_inds, :] y = y[topk_inds] x = x[topk_inds] x1 = (stride * x - base_len * bbox_pred[:, 0]).\ clamp(min=0, max=img_shape[1] - 1) y1 = (stride * y - base_len * bbox_pred[:, 1]).\ clamp(min=0, max=img_shape[0] - 1) x2 = (stride * x + base_len * bbox_pred[:, 2]).\ clamp(min=0, max=img_shape[1] - 1) y2 = (stride * y + base_len * bbox_pred[:, 3]).\ clamp(min=0, max=img_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], -1) det_bboxes.append(bboxes) det_scores.append(scores) det_bboxes = torch.cat(det_bboxes) if rescale: det_bboxes /= det_bboxes.new_tensor(scale_factor) det_scores = torch.cat(det_scores) padding = det_scores.new_zeros(det_scores.shape[0], 1) det_scores = torch.cat([padding, det_scores], dim=1) det_bboxes, det_labels = multiclass_nms(det_bboxes, det_scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels
16,360
41.167526
79
py
AlignShift
AlignShift-master/mmdet/models/bbox_heads/bbox_head.py
import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair from mmdet.core import (auto_fp16, bbox_target, delta2bbox, force_fp32, multiclass_nms) from ..builder import build_loss from ..losses import accuracy from ..registry import HEADS @HEADS.register_module class BBoxHead(nn.Module): """Simplest RoI head, with only two fc layers for classification and regression respectively""" def __init__(self, with_avg_pool=False, with_cls=True, with_reg=True, roi_feat_size=7, in_channels=256, num_classes=81, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2], reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict( type='SmoothL1Loss', beta=1.0, loss_weight=1.0)): super(BBoxHead, self).__init__() assert with_cls or with_reg self.with_avg_pool = with_avg_pool self.with_cls = with_cls self.with_reg = with_reg self.roi_feat_size = _pair(roi_feat_size) self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] self.in_channels = in_channels self.num_classes = num_classes self.target_means = target_means self.target_stds = target_stds self.reg_class_agnostic = reg_class_agnostic self.fp16_enabled = False self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) in_channels = self.in_channels if self.with_avg_pool: self.avg_pool = nn.AvgPool2d(self.roi_feat_size) else: in_channels *= self.roi_feat_area if self.with_cls: self.fc_cls = nn.Linear(in_channels, num_classes) if self.with_reg: out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes self.fc_reg = nn.Linear(in_channels, out_dim_reg) self.debug_imgs = None def init_weights(self): if self.with_cls: nn.init.normal_(self.fc_cls.weight, 0, 0.01) nn.init.constant_(self.fc_cls.bias, 0) if self.with_reg: nn.init.normal_(self.fc_reg.weight, 0, 0.001) nn.init.constant_(self.fc_reg.bias, 0) @auto_fp16() def forward(self, x): if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) cls_score = self.fc_cls(x) if self.with_cls else None bbox_pred = self.fc_reg(x) if self.with_reg else None return cls_score, bbox_pred def get_target(self, sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg): pos_proposals = [res.pos_bboxes for res in sampling_results] neg_proposals = [res.neg_bboxes for res in sampling_results] pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results] pos_gt_labels = [res.pos_gt_labels for res in sampling_results] reg_classes = 1 if self.reg_class_agnostic else self.num_classes cls_reg_targets = bbox_target( pos_proposals, neg_proposals, pos_gt_bboxes, pos_gt_labels, rcnn_train_cfg, reg_classes, target_means=self.target_means, target_stds=self.target_stds) return cls_reg_targets @force_fp32(apply_to=('cls_score', 'bbox_pred')) def loss(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, reduction_override=None): losses = dict() if cls_score is not None: avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) losses['loss_cls'] = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor, reduction_override=reduction_override) losses['acc'] = accuracy(cls_score, labels) losses['pos_n'] = (labels==1.0).sum() losses['neg_n'] = (labels==0).sum() if bbox_pred is not None: pos_inds = labels > 0 if self.reg_class_agnostic: pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), 4)[pos_inds] else: pos_bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)[pos_inds, labels[pos_inds]] losses['loss_bbox'] = self.loss_bbox( pos_bbox_pred, bbox_targets[pos_inds], bbox_weights[pos_inds], avg_factor=bbox_targets.size(0), reduction_override=reduction_override) return losses @force_fp32(apply_to=('cls_score', 'bbox_pred')) def get_det_bboxes(self, rois, cls_score, bbox_pred, img_shape, scale_factor, rescale=False, cfg=None): if isinstance(cls_score, list): cls_score = sum(cls_score) / float(len(cls_score)) scores = F.softmax(cls_score, dim=1) if cls_score is not None else None if bbox_pred is not None: bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means, self.target_stds, img_shape) else: bboxes = rois[:, 1:].clone() if img_shape is not None: bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1) bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1) if rescale: if isinstance(scale_factor, float): bboxes /= scale_factor else: scale_factor = torch.from_numpy(scale_factor).to(bboxes.device) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(bboxes.size()[0], -1) if cfg is None: return bboxes, scores else: det_bboxes, det_labels = multiclass_nms(bboxes, scores, cfg.score_thr, cfg.nms, cfg.max_per_img) return det_bboxes, det_labels @force_fp32(apply_to=('bbox_preds', )) def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas): """Refine bboxes during training. Args: rois (Tensor): Shape (n*bs, 5), where n is image number per GPU, and bs is the sampled RoIs per image. labels (Tensor): Shape (n*bs, ). bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class). pos_is_gts (list[Tensor]): Flags indicating if each positive bbox is a gt bbox. img_metas (list[dict]): Meta info of each image. Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. """ img_ids = rois[:, 0].long().unique(sorted=True) assert img_ids.numel() == len(img_metas) bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero(rois[:, 0] == i).squeeze() num_rois = inds.numel() bboxes_ = rois[inds, 1:] label_ = labels[inds] bbox_pred_ = bbox_preds[inds] img_meta_ = img_metas[i] pos_is_gts_ = pos_is_gts[i] bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, img_meta_) # filter gt bboxes pos_keep = 1 - pos_is_gts_ keep_inds = pos_is_gts_.new_ones(num_rois) keep_inds[:len(pos_is_gts_)] = pos_keep bboxes_list.append(bboxes[keep_inds]) return bboxes_list @force_fp32(apply_to=('bbox_pred', )) def regress_by_class(self, rois, label, bbox_pred, img_meta): """Regress the bbox for the predicted class. Used in Cascade R-CNN. Args: rois (Tensor): shape (n, 4) or (n, 5) label (Tensor): shape (n, ) bbox_pred (Tensor): shape (n, 4*(#class+1)) or (n, 4) img_meta (dict): Image meta info. Returns: Tensor: Regressed bboxes, the same shape as input rois. """ assert rois.size(1) == 4 or rois.size(1) == 5 if not self.reg_class_agnostic: label = label * 4 inds = torch.stack((label, label + 1, label + 2, label + 3), 1) bbox_pred = torch.gather(bbox_pred, 1, inds) assert bbox_pred.size(1) == 4 if rois.size(1) == 4: new_rois = delta2bbox(rois, bbox_pred, self.target_means, self.target_stds, img_meta['img_shape']) else: bboxes = delta2bbox(rois[:, 1:], bbox_pred, self.target_means, self.target_stds, img_meta['img_shape']) new_rois = torch.cat((rois[:, [0]], bboxes), dim=1) return new_rois
9,344
37.29918
79
py
AlignShift
AlignShift-master/mmdet/models/bbox_heads/convfc_bbox_head.py
import torch.nn as nn from ..registry import HEADS from ..utils import ConvModule from .bbox_head import BBoxHead @HEADS.register_module class ConvFCBBoxHead(BBoxHead): r"""More general bbox head, with shared conv and fc layers and two optional separated branches. /-> cls convs -> cls fcs -> cls shared convs -> shared fcs \-> reg convs -> reg fcs -> reg """ # noqa: W605 def __init__(self, num_shared_convs=0, num_shared_fcs=0, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, conv_out_channels=256, fc_out_channels=1024, conv_cfg=None, norm_cfg=None, fc_dropout=False, *args, **kwargs): super(ConvFCBBoxHead, self).__init__(*args, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True, True) self.shared_out_channels = last_layer_dim # add cls specific branch self.cls_convs, self.cls_fcs, self.cls_last_dim = \ self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) # add reg specific branch self.reg_convs, self.reg_fcs, self.reg_last_dim = \ self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area self.relu = nn.ReLU(inplace=True) # reconstruct fc_cls and fc_reg since input channels are changed if self.with_cls: self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg) def _add_conv_fc_branch(self, num_branch_convs, num_branch_fcs, in_channels, fc_dropout=False, is_shared=False): """Add shared or separable branch convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) # branch_fcs.append(nn.Sequential( # nn.Linear(fc_in_channels, self.fc_out_channels), # nn.Dropout(0.2, inplace=False)))#hyadd branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) # if fc_dropout: # branch_fcs.append(nn.Dropout(0.5, inplace=False)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def init_weights(self): super(ConvFCBBoxHead, self).init_weights() for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]: for m in module_list.modules(): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) nn.init.constant_(m.bias, 0) def forward(self, x): # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.view(x.size(0), -1) for fc in self.shared_fcs: x = self.relu(fc(x)) # separate branches x_cls = x x_reg = x for conv in self.cls_convs: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.view(x_cls.size(0), -1) for fc in self.cls_fcs: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.view(x_reg.size(0), -1) for fc in self.reg_fcs: x_reg = self.relu(fc(x_reg)) cls_score = self.fc_cls(x_cls) if self.with_cls else None bbox_pred = self.fc_reg(x_reg) if self.with_reg else None return cls_score, bbox_pred @HEADS.register_module class SharedFCBBoxHead(ConvFCBBoxHead): def __init__(self, num_fcs=2, fc_out_channels=1024, *args, **kwargs): assert num_fcs >= 1 super(SharedFCBBoxHead, self).__init__( #num_shared_convs=0,#hyadd num_shared_fcs=num_fcs, num_cls_convs=0, num_cls_fcs=0, num_reg_convs=0, num_reg_fcs=0, fc_out_channels=fc_out_channels, *args, **kwargs)
7,308
37.067708
82
py
AlignShift
AlignShift-master/mmdet/models/bbox_heads/double_bbox_head.py
import torch.nn as nn from mmcv.cnn.weight_init import normal_init, xavier_init from ..backbones.resnet import Bottleneck from ..registry import HEADS from ..utils import ConvModule from .bbox_head import BBoxHead class BasicResBlock(nn.Module): """Basic residual block. This block is a little different from the block in the ResNet backbone. The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. Args: in_channels (int): Channels of the input feature map. out_channels (int): Channels of the output feature map. conv_cfg (dict): The config dict for convolution layers. norm_cfg (dict): The config dict for normalization layers. """ def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN')): super(BasicResBlock, self).__init__() # main path self.conv1 = ConvModule( in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule( in_channels, out_channels, kernel_size=1, bias=False, activation=None, conv_cfg=conv_cfg, norm_cfg=norm_cfg) # identity path self.conv_identity = ConvModule( in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, activation=None) self.relu = nn.ReLU(inplace=True) def forward(self, x): identity = x x = self.conv1(x) x = self.conv2(x) identity = self.conv_identity(identity) out = x + identity out = self.relu(out) return out @HEADS.register_module class DoubleConvFCBBoxHead(BBoxHead): r"""Bbox head used in Double-Head R-CNN /-> cls /-> shared convs -> \-> reg roi features /-> cls \-> shared fc -> \-> reg """ # noqa: W605 def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(**kwargs) assert self.with_avg_pool assert num_convs > 0 assert num_fcs > 0 self.num_convs = num_convs self.num_fcs = num_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg # increase the channel of input features self.res_block = BasicResBlock(self.in_channels, self.conv_out_channels) # add conv heads self.conv_branch = self._add_conv_branch() # add fc heads self.fc_branch = self._add_fc_branch() out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes) self.relu = nn.ReLU(inplace=True) def _add_conv_branch(self): """Add the fc branch which consists of a sequential of conv layers""" branch_convs = nn.ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs def _add_fc_branch(self): """Add the fc branch which consists of a sequential of fc layers""" branch_fcs = nn.ModuleList() for i in range(self.num_fcs): fc_in_channels = ( self.in_channels * self.roi_feat_area if i == 0 else self.fc_out_channels) branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) return branch_fcs def init_weights(self): normal_init(self.fc_cls, std=0.01) normal_init(self.fc_reg, std=0.001) for m in self.fc_branch.modules(): if isinstance(m, nn.Linear): xavier_init(m, distribution='uniform') def forward(self, x_cls, x_reg): # conv head x_conv = self.res_block(x_reg) for conv in self.conv_branch: x_conv = conv(x_conv) if self.with_avg_pool: x_conv = self.avg_pool(x_conv) x_conv = x_conv.view(x_conv.size(0), -1) bbox_pred = self.fc_reg(x_conv) # fc head x_fc = x_cls.view(x_cls.size(0), -1) for fc in self.fc_branch: x_fc = self.relu(fc(x_fc)) cls_score = self.fc_cls(x_fc) return cls_score, bbox_pred
5,274
29.847953
78
py
AlignShift
AlignShift-master/mmdet/models/shared_heads/res_layer.py
import logging import torch.nn as nn from mmcv.cnn import constant_init, kaiming_init from mmcv.runner import load_checkpoint from mmdet.core import auto_fp16 from ..backbones import ResNet, make_res_layer from ..registry import SHARED_HEADS @SHARED_HEADS.register_module class ResLayer(nn.Module): def __init__(self, depth, stage=3, stride=2, dilation=1, style='pytorch', norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, with_cp=False, dcn=None): super(ResLayer, self).__init__() self.norm_eval = norm_eval self.norm_cfg = norm_cfg self.stage = stage self.fp16_enabled = False block, stage_blocks = ResNet.arch_settings[depth] stage_block = stage_blocks[stage] planes = 64 * 2**stage inplanes = 64 * 2**(stage - 1) * block.expansion res_layer = make_res_layer( block, inplanes, planes, stage_block, stride=stride, dilation=dilation, style=style, with_cp=with_cp, norm_cfg=self.norm_cfg, dcn=dcn) self.add_module('layer{}'.format(stage + 1), res_layer) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) else: raise TypeError('pretrained must be a str or None') @auto_fp16() def forward(self, x): res_layer = getattr(self, 'layer{}'.format(self.stage + 1)) out = res_layer(x) return out def train(self, mode=True): super(ResLayer, self).train(mode) if self.norm_eval: for m in self.modules(): if isinstance(m, nn.BatchNorm2d): m.eval()
2,236
29.643836
74
py
AlignShift
AlignShift-master/mmdet/models/utils/weight_init.py
import numpy as np import torch.nn as nn def xavier_init(module, gain=1, bias=0, distribution='normal'): assert distribution in ['uniform', 'normal'] if distribution == 'uniform': nn.init.xavier_uniform_(module.weight, gain=gain) else: nn.init.xavier_normal_(module.weight, gain=gain) if hasattr(module, 'bias'): nn.init.constant_(module.bias, bias) def normal_init(module, mean=0, std=1, bias=0): nn.init.normal_(module.weight, mean, std) if hasattr(module, 'bias'): nn.init.constant_(module.bias, bias) def uniform_init(module, a=0, b=1, bias=0): nn.init.uniform_(module.weight, a, b) if hasattr(module, 'bias'): nn.init.constant_(module.bias, bias) def kaiming_init(module, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'): assert distribution in ['uniform', 'normal'] if distribution == 'uniform': nn.init.kaiming_uniform_( module.weight, mode=mode, nonlinearity=nonlinearity) else: nn.init.kaiming_normal_( module.weight, mode=mode, nonlinearity=nonlinearity) if hasattr(module, 'bias'): nn.init.constant_(module.bias, bias) def bias_init_with_prob(prior_prob): """ initialize conv/fc bias value according to giving probablity""" bias_init = float(-np.log((1 - prior_prob) / prior_prob)) return bias_init
1,455
29.978723
71
py
AlignShift
AlignShift-master/mmdet/models/utils/norm.py
import torch.nn as nn norm_cfg = { # format: layer_type: (abbreviation, module) 'BN': ('bn', nn.BatchNorm2d), 'SyncBN': ('bn', nn.SyncBatchNorm), 'GN': ('gn', nn.GroupNorm), # and potentially 'SN' } def build_norm_layer(cfg, num_features, postfix=''): """ Build normalization layer Args: cfg (dict): cfg should contain: type (str): identify norm layer type. layer args: args needed to instantiate a norm layer. requires_grad (bool): [optional] whether stop gradient updates num_features (int): number of channels from input. postfix (int, str): appended into norm abbreviation to create named layer. Returns: name (str): abbreviation + postfix layer (nn.Module): created norm layer """ assert isinstance(cfg, dict) and 'type' in cfg cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in norm_cfg: raise KeyError('Unrecognized norm type {}'.format(layer_type)) else: abbr, norm_layer = norm_cfg[layer_type] if norm_layer is None: raise NotImplementedError assert isinstance(postfix, (int, str)) name = abbr + str(postfix) requires_grad = cfg_.pop('requires_grad', True) cfg_.setdefault('eps', 1e-5) if layer_type != 'GN': layer = norm_layer(num_features, **cfg_) if layer_type == 'SyncBN': layer._specify_ddp_gpu_num(1) else: assert 'num_groups' in cfg_ layer = norm_layer(num_channels=num_features, **cfg_) for param in layer.parameters(): param.requires_grad = requires_grad return name, layer
1,684
29.089286
74
py
AlignShift
AlignShift-master/mmdet/models/utils/scale.py
import torch import torch.nn as nn class Scale(nn.Module): """ A learnable scale parameter """ def __init__(self, scale=1.0): super(Scale, self).__init__() self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float)) def forward(self, x): return x * self.scale
314
18.6875
73
py
AlignShift
AlignShift-master/mmdet/models/utils/conv_ws.py
import torch.nn as nn import torch.nn.functional as F def conv_ws_2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, eps=1e-5): c_in = weight.size(0) weight_flat = weight.view(c_in, -1) mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) weight = (weight - mean) / (std + eps) return F.conv2d(input, weight, bias, stride, padding, dilation, groups) class ConvWS2d(nn.Conv2d): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, eps=1e-5): super(ConvWS2d, self).__init__( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) self.eps = eps def forward(self, x): return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.eps)
1,335
27.425532
79
py
AlignShift
AlignShift-master/mmdet/models/utils/conv_module.py
import warnings import torch.nn as nn from mmcv.cnn import constant_init, kaiming_init from .conv_ws import ConvWS2d from .norm import build_norm_layer conv_cfg = { 'Conv': nn.Conv2d, 'ConvWS': ConvWS2d, # TODO: octave conv } def build_conv_layer(cfg, *args, **kwargs): """ Build convolution layer Args: cfg (None or dict): cfg should contain: type (str): identify conv layer type. layer args: args needed to instantiate a conv layer. Returns: layer (nn.Module): created conv layer """ if cfg is None: cfg_ = dict(type='Conv') else: assert isinstance(cfg, dict) and 'type' in cfg cfg_ = cfg.copy() layer_type = cfg_.pop('type') if layer_type not in conv_cfg: raise KeyError('Unrecognized norm type {}'.format(layer_type)) else: conv_layer = conv_cfg[layer_type] layer = conv_layer(*args, **kwargs, **cfg_) return layer class ConvModule(nn.Module): """A conv block that contains conv/norm/activation layers. Args: in_channels (int): Same as nn.Conv2d. out_channels (int): Same as nn.Conv2d. kernel_size (int or tuple[int]): Same as nn.Conv2d. stride (int or tuple[int]): Same as nn.Conv2d. padding (int or tuple[int]): Same as nn.Conv2d. dilation (int or tuple[int]): Same as nn.Conv2d. groups (int): Same as nn.Conv2d. bias (bool or str): If specified as `auto`, it will be decided by the norm_cfg. Bias will be set as True if norm_cfg is None, otherwise False. conv_cfg (dict): Config dict for convolution layer. norm_cfg (dict): Config dict for normalization layer. activation (str or None): Activation type, "ReLU" by default. inplace (bool): Whether to use inplace mode for activation. order (tuple[str]): The order of conv/norm/activation layers. It is a sequence of "conv", "norm" and "act". Examples are ("conv", "norm", "act") and ("act", "conv", "norm"). """ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias='auto', conv_cfg=None, norm_cfg=None, activation='relu', inplace=True, order=('conv', 'norm', 'act')): super(ConvModule, self).__init__() assert conv_cfg is None or isinstance(conv_cfg, dict) assert norm_cfg is None or isinstance(norm_cfg, dict) self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.activation = activation self.inplace = inplace self.order = order assert isinstance(self.order, tuple) and len(self.order) == 3 assert set(order) == set(['conv', 'norm', 'act']) self.with_norm = norm_cfg is not None self.with_activatation = activation is not None # if the conv layer is before a norm layer, bias is unnecessary. if bias == 'auto': bias = False if self.with_norm else True self.with_bias = bias if self.with_norm and self.with_bias: warnings.warn('ConvModule has norm and bias at the same time') # build convolution layer self.conv = build_conv_layer( conv_cfg, in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias) # export the attributes of self.conv to a higher level for convenience self.in_channels = self.conv.in_channels self.out_channels = self.conv.out_channels self.kernel_size = self.conv.kernel_size self.stride = self.conv.stride self.padding = self.conv.padding self.dilation = self.conv.dilation self.transposed = self.conv.transposed self.output_padding = self.conv.output_padding self.groups = self.conv.groups # build normalization layers if self.with_norm: # norm layer is after conv layer if order.index('norm') > order.index('conv'): norm_channels = out_channels else: norm_channels = in_channels self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels) self.add_module(self.norm_name, norm) # build activation layer if self.with_activatation: # TODO: introduce `act_cfg` and supports more activation layers if self.activation not in ['relu']: raise ValueError('{} is currently not supported.'.format( self.activation)) if self.activation == 'relu': self.activate = nn.ReLU(inplace=inplace) # Use msra init by default self.init_weights() @property def norm(self): return getattr(self, self.norm_name) def init_weights(self): nonlinearity = 'relu' if self.activation is None else self.activation kaiming_init(self.conv, nonlinearity=nonlinearity) if self.with_norm: constant_init(self.norm, 1, bias=0) def forward(self, x, activate=True, norm=True): for layer in self.order: if layer == 'conv': x = self.conv(x) elif layer == 'norm' and norm and self.with_norm: x = self.norm(x) elif layer == 'act' and activate and self.with_activatation: x = self.activate(x) return x
5,745
33.824242
78
py
AlignShift
AlignShift-master/mmdet/models/losses/ghm_loss.py
import torch import torch.nn as nn import torch.nn.functional as F from ..registry import LOSSES def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), label_channels) return bin_labels, bin_label_weights # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module class GHMC(nn.Module): """GHM Classification Loss. Details of the theorem can be viewed in the paper "Gradient Harmonized Single-stage Detector". https://arxiv.org/abs/1811.05181 Args: bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. use_sigmoid (bool): Can only be true for BCE based loss now. loss_weight (float): The weight of the total GHM-C loss. """ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0): super(GHMC, self).__init__() self.bins = bins self.momentum = momentum edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] += 1e-6 if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.use_sigmoid = use_sigmoid if not self.use_sigmoid: raise NotImplementedError self.loss_weight = loss_weight def forward(self, pred, target, label_weight, *args, **kwargs): """Calculate the GHM-C loss. Args: pred (float tensor of size [batch_num, class_num]): The direct prediction of classification fc layer. target (float tensor of size [batch_num, class_num]): Binary class target for each sample. label_weight (float tensor of size [batch_num, class_num]): the value is 1 if the sample is valid and 0 if ignored. Returns: The gradient harmonized loss. """ # the target should be binary class label if pred.dim() != target.dim(): target, label_weight = _expand_binary_labels( target, label_weight, pred.size(-1)) target, label_weight = target.float(), label_weight.float() edges = self.edges mmt = self.momentum weights = torch.zeros_like(pred) # gradient length g = torch.abs(pred.sigmoid().detach() - target) valid = label_weight > 0 tot = max(valid.float().sum().item(), 1.0) n = 0 # n valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin n += 1 if n > 0: weights = weights / n loss = F.binary_cross_entropy_with_logits( pred, target, weights, reduction='sum') / tot return loss * self.loss_weight # TODO: code refactoring to make it consistent with other losses @LOSSES.register_module class GHMR(nn.Module): """GHM Regression Loss. Details of the theorem can be viewed in the paper "Gradient Harmonized Single-stage Detector" https://arxiv.org/abs/1811.05181 Args: mu (float): The parameter for the Authentic Smooth L1 loss. bins (int): Number of the unit regions for distribution calculation. momentum (float): The parameter for moving average. loss_weight (float): The weight of the total GHM-R loss. """ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0): super(GHMR, self).__init__() self.mu = mu self.bins = bins edges = torch.arange(bins + 1).float() / bins self.register_buffer('edges', edges) self.edges[-1] = 1e3 self.momentum = momentum if momentum > 0: acc_sum = torch.zeros(bins) self.register_buffer('acc_sum', acc_sum) self.loss_weight = loss_weight # TODO: support reduction parameter def forward(self, pred, target, label_weight, avg_factor=None): """Calculate the GHM-R loss. Args: pred (float tensor of size [batch_num, 4 (* class_num)]): The prediction of box regression layer. Channel number can be 4 or 4 * class_num depending on whether it is class-agnostic. target (float tensor of size [batch_num, 4 (* class_num)]): The target regression values with the same size of pred. label_weight (float tensor of size [batch_num, 4 (* class_num)]): The weight of each sample, 0 if ignored. Returns: The gradient harmonized loss. """ mu = self.mu edges = self.edges mmt = self.momentum # ASL1 loss diff = pred - target loss = torch.sqrt(diff * diff + mu * mu) - mu # gradient length g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() weights = torch.zeros_like(g) valid = label_weight > 0 tot = max(label_weight.float().sum().item(), 1.0) n = 0 # n: valid bins for i in range(self.bins): inds = (g >= edges[i]) & (g < edges[i + 1]) & valid num_in_bin = inds.sum().item() if num_in_bin > 0: n += 1 if mmt > 0: self.acc_sum[i] = mmt * self.acc_sum[i] \ + (1 - mmt) * num_in_bin weights[inds] = tot / self.acc_sum[i] else: weights[inds] = tot / num_in_bin if n > 0: weights /= n loss = loss * weights loss = loss.sum() / tot return loss * self.loss_weight
6,304
35.656977
79
py
AlignShift
AlignShift-master/mmdet/models/losses/mse_loss.py
import torch.nn as nn import torch.nn.functional as F from ..registry import LOSSES from .utils import weighted_loss mse_loss = weighted_loss(F.mse_loss) @LOSSES.register_module class MSELoss(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super().__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None): loss = self.loss_weight * mse_loss( pred, target, weight, reduction=self.reduction, avg_factor=avg_factor) return loss
632
23.346154
66
py
AlignShift
AlignShift-master/mmdet/models/losses/balanced_l1_loss.py
import numpy as np import torch import torch.nn as nn from ..registry import LOSSES from .utils import weighted_loss @weighted_loss def balanced_l1_loss(pred, target, beta=1.0, alpha=0.5, gamma=1.5, reduction='mean'): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) b = np.e**(gamma / alpha) - 1 loss = torch.where( diff < beta, alpha / b * (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff, gamma * diff + gamma / b - alpha * beta) return loss @LOSSES.register_module class BalancedL1Loss(nn.Module): """Balanced L1 Loss arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) """ def __init__(self, alpha=0.5, gamma=1.5, beta=1.0, reduction='mean', loss_weight=1.0): super(BalancedL1Loss, self).__init__() self.alpha = alpha self.gamma = gamma self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * balanced_l1_loss( pred, target, weight, alpha=self.alpha, gamma=self.gamma, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox
1,884
25.928571
73
py
AlignShift
AlignShift-master/mmdet/models/losses/iou_loss.py
import torch import torch.nn as nn from mmdet.core import bbox_overlaps from ..registry import LOSSES from .utils import weighted_loss @weighted_loss def iou_loss(pred, target, eps=1e-6): """IoU loss. Computing the IoU loss between a set of predicted bboxes and target bboxes. The loss is calculated as negative log of IoU. Args: pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2), shape (n, 4). target (Tensor): Corresponding gt bboxes, shape (n, 4). eps (float): Eps to avoid log(0). Return: Tensor: Loss tensor. """ ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps) loss = -ious.log() return loss @weighted_loss def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3): """Improving Object Localization with Fitness NMS and Bounded IoU Loss, https://arxiv.org/abs/1711.00164. Args: pred (tensor): Predicted bboxes. target (tensor): Target bboxes. beta (float): beta parameter in smoothl1. eps (float): eps to avoid NaN. """ pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5 pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5 pred_w = pred[:, 2] - pred[:, 0] + 1 pred_h = pred[:, 3] - pred[:, 1] + 1 with torch.no_grad(): target_ctrx = (target[:, 0] + target[:, 2]) * 0.5 target_ctry = (target[:, 1] + target[:, 3]) * 0.5 target_w = target[:, 2] - target[:, 0] + 1 target_h = target[:, 3] - target[:, 1] + 1 dx = target_ctrx - pred_ctrx dy = target_ctry - pred_ctry loss_dx = 1 - torch.max( (target_w - 2 * dx.abs()) / (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx)) loss_dy = 1 - torch.max( (target_h - 2 * dy.abs()) / (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy)) loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w + eps)) loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h + eps)) loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(loss_dx.size(0), -1) loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta) return loss @LOSSES.register_module class IoULoss(nn.Module): def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0): super(IoULoss, self).__init__() self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * iou_loss( pred, target, weight, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss @LOSSES.register_module class BoundedIoULoss(nn.Module): def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0): super(BoundedIoULoss, self).__init__() self.beta = beta self.eps = eps self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): if weight is not None and not torch.any(weight > 0): return (pred * weight).sum() # 0 assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss = self.loss_weight * bounded_iou_loss( pred, target, weight, beta=self.beta, eps=self.eps, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss
4,339
30.911765
79
py
AlignShift
AlignShift-master/mmdet/models/losses/smooth_l1_loss.py
import torch import torch.nn as nn from ..registry import LOSSES from .utils import weighted_loss @weighted_loss def smooth_l1_loss(pred, target, beta=1.0): assert beta > 0 assert pred.size() == target.size() and target.numel() > 0 diff = torch.abs(pred - target) loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) return loss @LOSSES.register_module class SmoothL1Loss(nn.Module): def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0): super(SmoothL1Loss, self).__init__() self.beta = beta self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * smooth_l1_loss( pred, target, weight, beta=self.beta, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_bbox
1,288
27.021739
73
py
AlignShift
AlignShift-master/mmdet/models/losses/utils.py
import functools import torch.nn.functional as F def reduce_loss(loss, reduction): """Reduce loss as specified. Args: loss (Tensor): Elementwise loss tensor. reduction (str): Options are "none", "mean" and "sum". Return: Tensor: Reduced loss tensor. """ reduction_enum = F._Reduction.get_enum(reduction) # none: 0, elementwise_mean:1, sum: 2 if reduction_enum == 0: return loss elif reduction_enum == 1: return loss.mean() elif reduction_enum == 2: return loss.sum() def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): """Apply element-wise weight and reduce loss. Args: loss (Tensor): Element-wise loss. weight (Tensor): Element-wise weights. reduction (str): Same as built-in losses of PyTorch. avg_factor (float): Avarage factor when computing the mean of losses. Returns: Tensor: Processed loss values. """ # if weight is specified, apply element-wise weight if weight is not None: loss = loss * weight # if avg_factor is not specified, just reduce the loss if avg_factor is None: loss = reduce_loss(loss, reduction) else: # if reduction is mean, then average the loss by avg_factor if reduction == 'mean': loss = loss.sum() / avg_factor # if reduction is 'none', then do nothing, otherwise raise an error elif reduction != 'none': raise ValueError('avg_factor can not be used with reduction="sum"') return loss def weighted_loss(loss_func): """Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) """ @functools.wraps(loss_func) def wrapper(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs): # get element-wise loss loss = loss_func(pred, target, **kwargs) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss return wrapper
3,003
29.343434
79
py
AlignShift
AlignShift-master/mmdet/models/losses/accuracy.py
import torch.nn as nn def accuracy(pred, target, topk=1): assert isinstance(topk, (int, tuple)) if isinstance(topk, int): topk = (topk, ) return_single = True else: return_single = False maxk = max(topk) _, pred_label = pred.topk(maxk, dim=1) pred_label = pred_label.t() correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) res = [] for k in topk: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / pred.size(0))) return res[0] if return_single else res class Accuracy(nn.Module): def __init__(self, topk=(1, )): super().__init__() self.topk = topk def forward(self, pred, target): return accuracy(pred, target, self.topk)
801
24.0625
69
py
AlignShift
AlignShift-master/mmdet/models/losses/focal_loss.py
import torch.nn as nn import torch.nn.functional as F from mmdet.ops import sigmoid_focal_loss as _sigmoid_focal_loss from ..registry import LOSSES from .utils import weight_reduce_loss # This method is only for debugging def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): # Function.apply does not accept keyword arguments, so the decorator # "weighted_loss" is not applicable loss = _sigmoid_focal_loss(pred, target, gamma, alpha) # TODO: find a proper way to handle the shape of weight if weight is not None: weight = weight.view(-1, 1) loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0): super(FocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' self.use_sigmoid = use_sigmoid self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: loss_cls = self.loss_weight * sigmoid_focal_loss( pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
2,784
32.554217
76
py
AlignShift
AlignShift-master/mmdet/models/losses/cross_entropy_loss.py
import torch import torch.nn as nn import torch.nn.functional as F from ..registry import LOSSES from .utils import weight_reduce_loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): # element-wise losses loss = F.cross_entropy(pred, label, reduction='none') # apply weights and do the reduction if weight is not None: weight = weight.float() loss = weight_reduce_loss( loss, weight=weight, reduction=reduction, avg_factor=avg_factor) return loss def _expand_binary_labels(labels, label_weights, label_channels): bin_labels = labels.new_full((labels.size(0), label_channels), 0) inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 if label_weights is None: bin_label_weights = None else: bin_label_weights = label_weights.view(-1, 1).expand( label_weights.size(0), label_channels) return bin_labels, bin_label_weights def binary_cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): if pred.dim() != label.dim(): label, weight = _expand_binary_labels(label, weight, pred.size(-1)) # weighted element-wise losses if weight is not None: weight = weight.float() loss = F.binary_cross_entropy_with_logits( pred, label.float(), weight, reduction='none') # do the reduction for the weighted loss loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor) return loss def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): # TODO: handle these two reserved arguments assert reduction == 'mean' and avg_factor is None num_rois = pred.size()[0] inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) pred_slice = pred[inds, label].squeeze(1) return F.binary_cross_entropy_with_logits( pred_slice, target, reduction='mean')[None] @LOSSES.register_module class CrossEntropyLoss(nn.Module): def __init__(self, use_sigmoid=False, use_mask=False, reduction='mean', loss_weight=1.0): super(CrossEntropyLoss, self).__init__() assert (use_sigmoid is False) or (use_mask is False) self.use_sigmoid = use_sigmoid self.use_mask = use_mask self.reduction = reduction self.loss_weight = loss_weight if self.use_sigmoid: self.cls_criterion = binary_cross_entropy elif self.use_mask: self.cls_criterion = mask_cross_entropy else: self.cls_criterion = cross_entropy def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_cls = self.loss_weight * self.cls_criterion( cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs) return loss_cls
3,386
31.567308
79
py
AlignShift
AlignShift-master/mmdet/models/backbones/hrnet.py
import logging import torch.nn as nn from mmcv.cnn import constant_init, kaiming_init from mmcv.runner import load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from ..registry import BACKBONES from ..utils import build_conv_layer, build_norm_layer from .resnet import BasicBlock, Bottleneck class HRModule(nn.Module): """ High-Resolution Module for HRNet. In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange is in this module. """ def __init__(self, num_branches, blocks, num_blocks, in_channels, num_channels, multiscale_output=True, with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN')): super(HRModule, self).__init__() self._check_branches(num_branches, num_blocks, in_channels, num_channels) self.in_channels = in_channels self.num_branches = num_branches self.multiscale_output = multiscale_output self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg self.with_cp = with_cp self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels) self.fuse_layers = self._make_fuse_layers() self.relu = nn.ReLU(inplace=False) def _check_branches(self, num_branches, num_blocks, in_channels, num_channels): if num_branches != len(num_blocks): error_msg = 'NUM_BRANCHES({}) <> NUM_BLOCKS({})'.format( num_branches, len(num_blocks)) raise ValueError(error_msg) if num_branches != len(num_channels): error_msg = 'NUM_BRANCHES({}) <> NUM_CHANNELS({})'.format( num_branches, len(num_channels)) raise ValueError(error_msg) if num_branches != len(in_channels): error_msg = 'NUM_BRANCHES({}) <> NUM_INCHANNELS({})'.format( num_branches, len(in_channels)) raise ValueError(error_msg) def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1): downsample = None if stride != 1 or \ self.in_channels[branch_index] != \ num_channels[branch_index] * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, self.in_channels[branch_index], num_channels[branch_index] * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, num_channels[branch_index] * block.expansion)[1]) layers = [] layers.append( block( self.in_channels[branch_index], num_channels[branch_index], stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) self.in_channels[branch_index] = \ num_channels[branch_index] * block.expansion for i in range(1, num_blocks[branch_index]): layers.append( block( self.in_channels[branch_index], num_channels[branch_index], with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) return nn.Sequential(*layers) def _make_branches(self, num_branches, block, num_blocks, num_channels): branches = [] for i in range(num_branches): branches.append( self._make_one_branch(i, block, num_blocks, num_channels)) return nn.ModuleList(branches) def _make_fuse_layers(self): if self.num_branches == 1: return None num_branches = self.num_branches in_channels = self.in_channels fuse_layers = [] num_out_branches = num_branches if self.multiscale_output else 1 for i in range(num_out_branches): fuse_layer = [] for j in range(num_branches): if j > i: fuse_layer.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=1, stride=1, padding=0, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1], nn.Upsample( scale_factor=2**(j - i), mode='nearest'))) elif j == i: fuse_layer.append(None) else: conv_downsamples = [] for k in range(i - j): if k == i - j - 1: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[i], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[i])[1])) else: conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels[j], in_channels[j], kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, in_channels[j])[1], nn.ReLU(inplace=False))) fuse_layer.append(nn.Sequential(*conv_downsamples)) fuse_layers.append(nn.ModuleList(fuse_layer)) return nn.ModuleList(fuse_layers) def forward(self, x): if self.num_branches == 1: return [self.branches[0](x[0])] for i in range(self.num_branches): x[i] = self.branches[i](x[i]) x_fuse = [] for i in range(len(self.fuse_layers)): y = 0 for j in range(self.num_branches): if i == j: y += x[j] else: y += self.fuse_layers[i][j](x[j]) x_fuse.append(self.relu(y)) return x_fuse @BACKBONES.register_module class HRNet(nn.Module): """HRNet backbone. High-Resolution Representations for Labeling Pixels and Regions arXiv: https://arxiv.org/abs/1904.04514 Args: extra (dict): detailed configuration for each stage of HRNet. in_channels (int): Number of input image channels. Normally 3. conv_cfg (dict): dictionary to construct and config conv layer. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. Example: >>> from mmdet.models import HRNet >>> import torch >>> extra = dict( >>> stage1=dict( >>> num_modules=1, >>> num_branches=1, >>> block='BOTTLENECK', >>> num_blocks=(4, ), >>> num_channels=(64, )), >>> stage2=dict( >>> num_modules=1, >>> num_branches=2, >>> block='BASIC', >>> num_blocks=(4, 4), >>> num_channels=(32, 64)), >>> stage3=dict( >>> num_modules=4, >>> num_branches=3, >>> block='BASIC', >>> num_blocks=(4, 4, 4), >>> num_channels=(32, 64, 128)), >>> stage4=dict( >>> num_modules=3, >>> num_branches=4, >>> block='BASIC', >>> num_blocks=(4, 4, 4, 4), >>> num_channels=(32, 64, 128, 256))) >>> self = HRNet(extra, in_channels=1) >>> self.eval() >>> inputs = torch.rand(1, 1, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 32, 8, 8) (1, 64, 4, 4) (1, 128, 2, 2) (1, 256, 1, 1) """ blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} def __init__(self, extra, in_channels=3, conv_cfg=None, norm_cfg=dict(type='BN'), norm_eval=True, with_cp=False, zero_init_residual=False): super(HRNet, self).__init__() self.extra = extra self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.norm_eval = norm_eval self.with_cp = with_cp self.zero_init_residual = zero_init_residual # stem net self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) self.conv1 = build_conv_layer( self.conv_cfg, in_channels, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( self.conv_cfg, 64, 64, kernel_size=3, stride=2, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) # stage 1 self.stage1_cfg = self.extra['stage1'] num_channels = self.stage1_cfg['num_channels'][0] block_type = self.stage1_cfg['block'] num_blocks = self.stage1_cfg['num_blocks'][0] block = self.blocks_dict[block_type] stage1_out_channels = num_channels * block.expansion self.layer1 = self._make_layer(block, 64, num_channels, num_blocks) # stage 2 self.stage2_cfg = self.extra['stage2'] num_channels = self.stage2_cfg['num_channels'] block_type = self.stage2_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition1 = self._make_transition_layer([stage1_out_channels], num_channels) self.stage2, pre_stage_channels = self._make_stage( self.stage2_cfg, num_channels) # stage 3 self.stage3_cfg = self.extra['stage3'] num_channels = self.stage3_cfg['num_channels'] block_type = self.stage3_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage3, pre_stage_channels = self._make_stage( self.stage3_cfg, num_channels) # stage 4 self.stage4_cfg = self.extra['stage4'] num_channels = self.stage4_cfg['num_channels'] block_type = self.stage4_cfg['block'] block = self.blocks_dict[block_type] num_channels = [channel * block.expansion for channel in num_channels] self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels) self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels) @property def norm1(self): return getattr(self, self.norm1_name) @property def norm2(self): return getattr(self, self.norm2_name) def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer): num_branches_cur = len(num_channels_cur_layer) num_branches_pre = len(num_channels_pre_layer) transition_layers = [] for i in range(num_branches_cur): if i < num_branches_pre: if num_channels_cur_layer[i] != num_channels_pre_layer[i]: transition_layers.append( nn.Sequential( build_conv_layer( self.conv_cfg, num_channels_pre_layer[i], num_channels_cur_layer[i], kernel_size=3, stride=1, padding=1, bias=False), build_norm_layer(self.norm_cfg, num_channels_cur_layer[i])[1], nn.ReLU(inplace=True))) else: transition_layers.append(None) else: conv_downsamples = [] for j in range(i + 1 - num_branches_pre): in_channels = num_channels_pre_layer[-1] out_channels = num_channels_cur_layer[i] \ if j == i - num_branches_pre else in_channels conv_downsamples.append( nn.Sequential( build_conv_layer( self.conv_cfg, in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False), build_norm_layer(self.norm_cfg, out_channels)[1], nn.ReLU(inplace=True))) transition_layers.append(nn.Sequential(*conv_downsamples)) return nn.ModuleList(transition_layers) def _make_layer(self, block, inplanes, planes, blocks, stride=1): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( self.conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(self.norm_cfg, planes * block.expansion)[1]) layers = [] layers.append( block( inplanes, planes, stride, downsample=downsample, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) return nn.Sequential(*layers) def _make_stage(self, layer_config, in_channels, multiscale_output=True): num_modules = layer_config['num_modules'] num_branches = layer_config['num_branches'] num_blocks = layer_config['num_blocks'] num_channels = layer_config['num_channels'] block = self.blocks_dict[layer_config['block']] hr_modules = [] for i in range(num_modules): # multi_scale_output is only used for the last module if not multiscale_output and i == num_modules - 1: reset_multiscale_output = False else: reset_multiscale_output = True hr_modules.append( HRModule( num_branches, block, num_blocks, in_channels, num_channels, reset_multiscale_output, with_cp=self.with_cp, norm_cfg=self.norm_cfg, conv_cfg=self.conv_cfg)) return nn.Sequential(*hr_modules), in_channels def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.conv2(x) x = self.norm2(x) x = self.relu(x) x = self.layer1(x) x_list = [] for i in range(self.stage2_cfg['num_branches']): if self.transition1[i] is not None: x_list.append(self.transition1[i](x)) else: x_list.append(x) y_list = self.stage2(x_list) x_list = [] for i in range(self.stage3_cfg['num_branches']): if self.transition2[i] is not None: x_list.append(self.transition2[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage3(x_list) x_list = [] for i in range(self.stage4_cfg['num_branches']): if self.transition3[i] is not None: x_list.append(self.transition3[i](y_list[-1])) else: x_list.append(y_list[i]) y_list = self.stage4(x_list) return y_list def train(self, mode=True): super(HRNet, self).train(mode) if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval()
19,868
36.773764
79
py
AlignShift
AlignShift-master/mmdet/models/backbones/resnet.py
import logging import torch.nn as nn import torch.utils.checkpoint as cp from mmcv.cnn import constant_init, kaiming_init from mmcv.runner import load_checkpoint from torch.nn.modules.batchnorm import _BatchNorm from mmdet.models.plugins import GeneralizedAttention from mmdet.ops import ContextBlock, DeformConv, ModulatedDeformConv from ..registry import BACKBONES from ..utils import build_conv_layer, build_norm_layer class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, gen_attention=None): super(BasicBlock, self).__init__() assert dcn is None, "Not implemented yet." assert gen_attention is None, "Not implemented yet." assert gcb is None, "Not implemented yet." self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, 3, stride=stride, padding=dilation, dilation=dilation, bias=False) self.add_module(self.norm1_name, norm1) self.conv2 = build_conv_layer( conv_cfg, planes, planes, 3, padding=1, bias=False) self.add_module(self.norm2_name, norm2) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation assert not with_cp @property def norm1(self): return getattr(self, self.norm1_name) @property def norm2(self): return getattr(self, self.norm2_name) def forward(self, x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) out = self.conv2(out) out = self.norm2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, gen_attention=None): """Bottleneck block for ResNet. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__() assert style in ['pytorch', 'caffe'] assert dcn is None or isinstance(dcn, dict) assert gcb is None or isinstance(gcb, dict) assert gen_attention is None or isinstance(gen_attention, dict) self.inplanes = inplanes self.planes = planes self.stride = stride self.dilation = dilation self.style = style self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.dcn = dcn self.with_dcn = dcn is not None self.gcb = gcb self.with_gcb = gcb is not None self.gen_attention = gen_attention self.with_gen_attention = gen_attention is not None if self.style == 'pytorch': self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) self.norm3_name, norm3 = build_norm_layer( norm_cfg, planes * self.expansion, postfix=3) self.conv1 = build_conv_layer( conv_cfg, inplanes, planes, kernel_size=1, stride=self.conv1_stride, bias=False) self.add_module(self.norm1_name, norm1) fallback_on_stride = False self.with_modulated_dcn = False if self.with_dcn: fallback_on_stride = dcn.get('fallback_on_stride', False) self.with_modulated_dcn = dcn.get('modulated', False) if not self.with_dcn or fallback_on_stride: self.conv2 = build_conv_layer( conv_cfg, planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, bias=False) else: assert conv_cfg is None, 'conv_cfg must be None for DCN' self.deformable_groups = dcn.get('deformable_groups', 1) if not self.with_modulated_dcn: conv_op = DeformConv offset_channels = 18 else: conv_op = ModulatedDeformConv offset_channels = 27 self.conv2_offset = nn.Conv2d( planes, self.deformable_groups * offset_channels, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation) self.conv2 = conv_op( planes, planes, kernel_size=3, stride=self.conv2_stride, padding=dilation, dilation=dilation, deformable_groups=self.deformable_groups, bias=False) self.add_module(self.norm2_name, norm2) self.conv3 = build_conv_layer( conv_cfg, planes, planes * self.expansion, kernel_size=1, bias=False) self.add_module(self.norm3_name, norm3) self.relu = nn.ReLU(inplace=True) self.downsample = downsample if self.with_gcb: gcb_inplanes = planes * self.expansion self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb) # gen_attention if self.with_gen_attention: self.gen_attention_block = GeneralizedAttention( planes, **gen_attention) @property def norm1(self): return getattr(self, self.norm1_name) @property def norm2(self): return getattr(self, self.norm2_name) @property def norm3(self): return getattr(self, self.norm3_name) def forward(self, x): def _inner_forward(x): identity = x out = self.conv1(x) out = self.norm1(out) out = self.relu(out) if not self.with_dcn: out = self.conv2(out) elif self.with_modulated_dcn: offset_mask = self.conv2_offset(out) offset = offset_mask[:, :18 * self.deformable_groups, :, :] mask = offset_mask[:, -9 * self.deformable_groups:, :, :] mask = mask.sigmoid() out = self.conv2(out, offset, mask) else: offset = self.conv2_offset(out) out = self.conv2(out, offset) out = self.norm2(out) out = self.relu(out) if self.with_gen_attention: out = self.gen_attention_block(out) out = self.conv3(out) out = self.norm3(out) if self.with_gcb: out = self.context_block(out) if self.downsample is not None: identity = self.downsample(x) out += identity return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu(out) return out def make_res_layer(block, inplanes, planes, blocks, stride=1, dilation=1, style='pytorch', with_cp=False, conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, gcb=None, gen_attention=None, gen_attention_blocks=[]): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), build_norm_layer(norm_cfg, planes * block.expansion)[1], ) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=gen_attention if (0 in gen_attention_blocks) else None)) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, dilation=dilation, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=gen_attention if (i in gen_attention_blocks) else None)) return nn.Sequential(*layers) @BACKBONES.register_module class ResNet(nn.Module): """ResNet backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. in_channels (int): Number of input image channels. Normally 3. num_stages (int): Resnet stages, normally 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. norm_cfg (dict): dictionary to construct and config norm layer. norm_eval (bool): Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch Norm and its variants only. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. zero_init_residual (bool): whether to use zero init for last norm layer in resblocks to let them behave as identity. Example: >>> from mmdet.models import ResNet >>> import torch >>> self = ResNet(depth=18) >>> self.eval() >>> inputs = torch.rand(1, 3, 32, 32) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 64, 8, 8) (1, 128, 4, 4) (1, 256, 2, 2) (1, 512, 1, 1) """ arch_settings = { 18: (BasicBlock, (2, 2, 2, 2)), 34: (BasicBlock, (3, 4, 6, 3)), 50: (Bottleneck, (3, 4, 6, 3)), 101: (Bottleneck, (3, 4, 23, 3)), 152: (Bottleneck, (3, 8, 36, 3)) } def __init__(self, depth, in_channels=3, num_stages=4, strides=(1, 2, 2, 2), dilations=(1, 1, 1, 1), out_indices=(0, 1, 2, 3), style='pytorch', frozen_stages=-1, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), gcb=None, stage_with_gcb=(False, False, False, False), gen_attention=None, stage_with_gen_attention=((), (), (), ()), with_cp=False, zero_init_residual=True): super(ResNet, self).__init__() if depth not in self.arch_settings: raise KeyError('invalid depth {} for resnet'.format(depth)) self.depth = depth self.num_stages = num_stages assert num_stages >= 1 and num_stages <= 4 self.strides = strides self.dilations = dilations assert len(strides) == len(dilations) == num_stages self.out_indices = out_indices assert max(out_indices) < num_stages self.style = style self.frozen_stages = frozen_stages self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.with_cp = with_cp self.norm_eval = norm_eval self.dcn = dcn self.stage_with_dcn = stage_with_dcn if dcn is not None: assert len(stage_with_dcn) == num_stages self.gen_attention = gen_attention self.gcb = gcb self.stage_with_gcb = stage_with_gcb if gcb is not None: assert len(stage_with_gcb) == num_stages self.zero_init_residual = zero_init_residual self.block, stage_blocks = self.arch_settings[depth] self.stage_blocks = stage_blocks[:num_stages] self.inplanes = 64 self._make_stem_layer(in_channels) self.res_layers = [] for i, num_blocks in enumerate(self.stage_blocks): stride = strides[i] dilation = dilations[i] dcn = self.dcn if self.stage_with_dcn[i] else None gcb = self.gcb if self.stage_with_gcb[i] else None planes = 64 * 2**i res_layer = make_res_layer( self.block, self.inplanes, planes, num_blocks, stride=stride, dilation=dilation, style=self.style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=gen_attention, gen_attention_blocks=stage_with_gen_attention[i]) self.inplanes = planes * self.block.expansion layer_name = 'layer{}'.format(i + 1) self.add_module(layer_name, res_layer) self.res_layers.append(layer_name) self._freeze_stages() self.feat_dim = self.block.expansion * 64 * 2**( len(self.stage_blocks) - 1) @property def norm1(self): return getattr(self, self.norm1_name) def _make_stem_layer(self, in_channels): self.conv1 = build_conv_layer( self.conv_cfg, in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) self.add_module(self.norm1_name, norm1) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) def _freeze_stages(self): if self.frozen_stages >= 0: self.norm1.eval() for m in [self.conv1, self.norm1]: for param in m.parameters(): param.requires_grad = False for i in range(1, self.frozen_stages + 1): m = getattr(self, 'layer{}'.format(i)) m.eval() for param in m.parameters(): param.requires_grad = False def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, (_BatchNorm, nn.GroupNorm)): constant_init(m, 1) if self.dcn is not None: for m in self.modules(): if isinstance(m, Bottleneck) and hasattr( m, 'conv2_offset'): constant_init(m.conv2_offset, 0) if self.zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): constant_init(m.norm3, 0) elif isinstance(m, BasicBlock): constant_init(m.norm2, 0) else: raise TypeError('pretrained must be a str or None') def forward(self, x): x = self.conv1(x) x = self.norm1(x) x = self.relu(x) x = self.maxpool(x) outs = [] for i, layer_name in enumerate(self.res_layers): res_layer = getattr(self, layer_name) x = res_layer(x) if i in self.out_indices: outs.append(x) return tuple(outs) def train(self, mode=True): super(ResNet, self).train(mode) self._freeze_stages() if mode and self.norm_eval: for m in self.modules(): # trick: eval have effect on BatchNorm only if isinstance(m, _BatchNorm): m.eval()
18,099
32.272059
79
py
AlignShift
AlignShift-master/mmdet/models/backbones/ssd_vgg.py
import logging import torch import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init from mmcv.runner import load_checkpoint from ..registry import BACKBONES @BACKBONES.register_module class SSDVGG(VGG): """VGG Backbone network for single-shot-detection Args: input_size (int): width and height of input, from {300, 512}. depth (int): Depth of vgg, from {11, 13, 16, 19}. out_indices (Sequence[int]): Output from which stages. Example: >>> self = SSDVGG(input_size=300, depth=11) >>> self.eval() >>> inputs = torch.rand(1, 3, 300, 300) >>> level_outputs = self.forward(inputs) >>> for level_out in level_outputs: ... print(tuple(level_out.shape)) (1, 1024, 19, 19) (1, 512, 10, 10) (1, 256, 5, 5) (1, 256, 3, 3) (1, 256, 1, 1) """ extra_setting = { 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256), 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128), } def __init__(self, input_size, depth, with_last_pool=False, ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), l2_norm_scale=20.): # TODO: in_channels for mmcv.VGG super(SSDVGG, self).__init__( depth, with_last_pool=with_last_pool, ceil_mode=ceil_mode, out_indices=out_indices) assert input_size in (300, 512) self.input_size = input_size self.features.add_module( str(len(self.features)), nn.MaxPool2d(kernel_size=3, stride=1, padding=1)) self.features.add_module( str(len(self.features)), nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.features.add_module( str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1)) self.features.add_module( str(len(self.features)), nn.ReLU(inplace=True)) self.out_feature_indices = out_feature_indices self.inplanes = 1024 self.extra = self._make_extra_layers(self.extra_setting[input_size]) self.l2_norm = L2Norm( self.features[out_feature_indices[0] - 1].out_channels, l2_norm_scale) def init_weights(self, pretrained=None): if isinstance(pretrained, str): logger = logging.getLogger() load_checkpoint(self, pretrained, strict=False, logger=logger) elif pretrained is None: for m in self.features.modules(): if isinstance(m, nn.Conv2d): kaiming_init(m) elif isinstance(m, nn.BatchNorm2d): constant_init(m, 1) elif isinstance(m, nn.Linear): normal_init(m, std=0.01) else: raise TypeError('pretrained must be a str or None') for m in self.extra.modules(): if isinstance(m, nn.Conv2d): xavier_init(m, distribution='uniform') constant_init(self.l2_norm, self.l2_norm.scale) def forward(self, x): outs = [] for i, layer in enumerate(self.features): x = layer(x) if i in self.out_feature_indices: outs.append(x) for i, layer in enumerate(self.extra): x = F.relu(layer(x), inplace=True) if i % 2 == 1: outs.append(x) outs[0] = self.l2_norm(outs[0]) if len(outs) == 1: return outs[0] else: return tuple(outs) def _make_extra_layers(self, outplanes): layers = [] kernel_sizes = (1, 3) num_layers = 0 outplane = None for i in range(len(outplanes)): if self.inplanes == 'S': self.inplanes = outplane continue k = kernel_sizes[num_layers % 2] if outplanes[i] == 'S': outplane = outplanes[i + 1] conv = nn.Conv2d( self.inplanes, outplane, k, stride=2, padding=1) else: outplane = outplanes[i] conv = nn.Conv2d( self.inplanes, outplane, k, stride=1, padding=0) layers.append(conv) self.inplanes = outplanes[i] num_layers += 1 if self.input_size == 512: layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1)) return nn.Sequential(*layers) class L2Norm(nn.Module): def __init__(self, n_dims, scale=20., eps=1e-10): super(L2Norm, self).__init__() self.n_dims = n_dims self.weight = nn.Parameter(torch.Tensor(self.n_dims)) self.eps = eps self.scale = scale def forward(self, x): # normalization layer convert to FP32 in FP16 training x_float = x.float() norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps return (self.weight[None, :, None, None].float().expand_as(x_float) * x_float / norm).type_as(x)
5,335
33.425806
79
py