| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from torch.autograd import Variable |
| import sys |
|
|
|
|
| class Conv2d(nn.Module): |
| def __init__(self, in_ch, out_ch, kernel_size=3, stride=1, padding=1, D=1, activation=nn.ReLU()): |
| super(Conv2d, self).__init__() |
| if activation: |
| self.conv = nn.Sequential( |
| nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=padding, dilation=D), |
| activation |
| ) |
| else: |
| self.conv = nn.Sequential( |
| nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=padding, dilation=D) |
| ) |
|
|
| def forward(self, x): |
| x = self.conv(x) |
| return x |
| |
| def init_He(module): |
| for m in module.modules(): |
| if isinstance(m, nn.Conv2d): |
| nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') |
| elif isinstance(m, nn.BatchNorm2d): |
| nn.init.constant_(m.weight, 1) |
| nn.init.constant_(m.bias, 0) |
|
|
| def pad_divide_by(in_list, d, in_size): |
| out_list = [] |
| h, w = in_size |
| if h % d > 0: |
| new_h = h + d - h % d |
| else: |
| new_h = h |
| if w % d > 0: |
| new_w = w + d - w % d |
| else: |
| new_w = w |
| lh, uh = int((new_h-h) / 2), int(new_h-h) - int((new_h-h) / 2) |
| lw, uw = int((new_w-w) / 2), int(new_w-w) - int((new_w-w) / 2) |
| pad_array = (int(lw), int(uw), int(lh), int(uh)) |
| for inp in in_list: |
| out_list.append(F.pad(inp, pad_array)) |
| return out_list, pad_array |