Spaces:
Runtime error
Runtime error
| import torch | |
| import torch.nn as nn | |
| from torch.nn import functional as F | |
| from nets.CSPdarknet_tiny import darknet_tiny | |
| class BasicConv(nn.Module): | |
| def __init__(self, in_channels, out_channels, kernel_size, stride=1): | |
| super(BasicConv, self).__init__() | |
| self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size//2, bias=False) | |
| self.bn = nn.BatchNorm2d(out_channels) | |
| self.activation = nn.LeakyReLU(0.1) | |
| def forward(self, x): | |
| x = self.conv(x) | |
| x = self.bn(x) | |
| x = self.activation(x) | |
| return x | |
| class Upsample(nn.Module): | |
| def __init__(self, in_channels, out_channels): | |
| super(Upsample, self).__init__() | |
| self.upsample = nn.Sequential( | |
| BasicConv(in_channels, out_channels, 1), | |
| nn.Upsample(scale_factor=2, mode='nearest') | |
| ) | |
| def forward(self, x,): | |
| x = self.upsample(x) | |
| return x | |
| def yolo_head(filters_list, in_filters): | |
| m = nn.Sequential( | |
| BasicConv(in_filters, filters_list[0], 3), | |
| nn.Conv2d(filters_list[0], filters_list[1], 1), | |
| ) | |
| return m | |
| class ConvBNReLU(nn.Module): | |
| '''Module for the Conv-BN-ReLU tuple.''' | |
| def __init__(self, c_in, c_out, kernel_size, stride, padding, dilation, | |
| use_relu=True): | |
| super(ConvBNReLU, self).__init__() | |
| self.conv = nn.Conv2d( | |
| c_in, c_out, kernel_size=kernel_size, stride=stride, | |
| padding=padding, dilation=dilation, bias=False) | |
| self.bn = nn.BatchNorm2d(c_out) | |
| if use_relu: | |
| self.relu = nn.ReLU(inplace=True) | |
| else: | |
| self.relu = None | |
| def forward(self, x): | |
| x = self.conv(x) | |
| x = self.bn(x) | |
| if self.relu is not None: | |
| x = self.relu(x) | |
| return x | |
| class CARAFE(nn.Module): | |
| def __init__(self, c, c_mid=64, scale=2, k_up=5, k_enc=3): | |
| """ The unofficial implementation of the CARAFE module. | |
| The details are in "https://arxiv.org/abs/1905.02188". | |
| Args: | |
| c: The channel number of the input and the output. | |
| c_mid: The channel number after compression. | |
| scale: The expected upsample scale. | |
| k_up: The size of the reassembly kernel. | |
| k_enc: The kernel size of the encoder. | |
| Returns: | |
| X: The upsampled feature map. | |
| """ | |
| super(CARAFE, self).__init__() | |
| self.scale = scale | |
| self.comp = ConvBNReLU(c, c_mid, kernel_size=1, stride=1, | |
| padding=0, dilation=1) | |
| self.enc = ConvBNReLU(c_mid, (scale * k_up) ** 2, kernel_size=k_enc, | |
| stride=1, padding=k_enc // 2, dilation=1, | |
| use_relu=False) | |
| self.pix_shf = nn.PixelShuffle(scale) | |
| self.upsmp = nn.Upsample(scale_factor=scale, mode='nearest') | |
| self.unfold = nn.Unfold(kernel_size=k_up, dilation=scale, | |
| padding=k_up // 2 * scale) | |
| def forward(self, X): | |
| b, c, h, w = X.size() | |
| h_, w_ = h * self.scale, w * self.scale | |
| W = self.comp(X) # b * m * h * w | |
| W = self.enc(W) # b * 100 * h * w | |
| W = self.pix_shf(W) # b * 25 * h_ * w_ | |
| W = F.softmax(W, dim=1) # b * 25 * h_ * w_ | |
| X = self.upsmp(X) # b * c * h_ * w_ | |
| X = self.unfold(X) # b * 25c * h_ * w_ | |
| X = X.view(b, c, -1, h_, w_) # b * 25 * c * h_ * w_ | |
| X = torch.einsum('bkhw,bckhw->bchw', [W, X]) # b * c * h_ * w_ | |
| return X | |
| #---------------------------------------------------# | |
| # yolo_body--MSFNet | |
| #---------------------------------------------------# | |
| class YoloBody(nn.Module): | |
| def __init__(self, anchors_mask, num_classes, phi=0, backbone ='tiny', pretrained=False): | |
| super(YoloBody, self).__init__() | |
| if backbone == 'tiny': | |
| self.backbone = darknet_tiny(pretrained) | |
| self.conv_for_P5 = BasicConv(512,256,1) | |
| self.yolo_headP5 = yolo_head([512, len(anchors_mask[0]) * (5 + num_classes)],256) | |
| self.upsample_1 = Upsample(256,128) | |
| self.conv1 = BasicConv(256,128,1) | |
| self.upsample_2 = CARAFE(128) | |
| self.yolo_headP4 = yolo_head([256, len(anchors_mask[1]) * (5 + num_classes)],384) | |
| def forward(self, x): | |
| feat1, feat2 = self.backbone(x) | |
| # 13,13,512 -> 13,13,256 | |
| P5 = self.conv_for_P5(feat2) | |
| # 13,13,256 -> 13,13,512 -> 13,13,255 | |
| out0 = self.yolo_headP5(P5) | |
| P6 = self.conv_for_P5(feat2) | |
| P6_Upsample = self.upsample_1(P6) | |
| # 13,13,256 -> 13,13,128 -> 26,26,128 | |
| P5 = self.conv1(P5) | |
| P5_Upsample = self.upsample_2(P5) | |
| sum = P5_Upsample + P6_Upsample | |
| # 26,26,256 + 26,26,128 -> 26,26,384 | |
| # if 1 <= self.phi and self.phi <= 4: | |
| # P5_Upsample = self.upsample_att(P5_Upsample) | |
| # P4 = torch.cat([P5_Upsample, feat1],axis=1) | |
| P4 = torch.cat([sum, feat1],axis=1) | |
| # 26,26,384 -> 26,26,256 -> 26,26,255 | |
| out1 = self.yolo_headP4(P4) | |
| return out0, out1 | |