|
|
import math |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import torch.utils.checkpoint as checkpoint |
|
|
from timm.models.layers import DropPath, to_2tuple, trunc_normal_ |
|
|
|
|
|
|
|
|
class Mlp(nn.Module): |
|
|
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): |
|
|
super().__init__() |
|
|
out_features = out_features or in_features |
|
|
hidden_features = hidden_features or in_features |
|
|
self.fc1 = nn.Linear(in_features, hidden_features) |
|
|
self.act = act_layer() |
|
|
self.fc2 = nn.Linear(hidden_features, out_features) |
|
|
self.drop = nn.Dropout(drop) |
|
|
|
|
|
def forward(self, x): |
|
|
x = self.fc1(x) |
|
|
x = self.act(x) |
|
|
x = self.drop(x) |
|
|
x = self.fc2(x) |
|
|
x = self.drop(x) |
|
|
return x |
|
|
|
|
|
|
|
|
def window_partition(x, window_size): |
|
|
""" |
|
|
Args: |
|
|
x: (B, H, W, C) |
|
|
window_size (int): window size |
|
|
|
|
|
Returns: |
|
|
windows: (num_windows*B, window_size, window_size, C) |
|
|
""" |
|
|
B, H, W, C = x.shape |
|
|
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) |
|
|
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) |
|
|
return windows |
|
|
|
|
|
|
|
|
def window_reverse(windows, window_size, H, W): |
|
|
""" |
|
|
Args: |
|
|
windows: (num_windows*B, window_size, window_size, C) |
|
|
window_size (int): Window size |
|
|
H (int): Height of image |
|
|
W (int): Width of image |
|
|
|
|
|
Returns: |
|
|
x: (B, H, W, C) |
|
|
""" |
|
|
B = int(windows.shape[0] / (H * W / window_size / window_size)) |
|
|
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) |
|
|
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) |
|
|
return x |
|
|
|
|
|
|
|
|
class WindowAttention(nn.Module): |
|
|
|
|
|
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): |
|
|
|
|
|
super().__init__() |
|
|
self.dim = dim |
|
|
self.window_size = window_size |
|
|
self.num_heads = num_heads |
|
|
head_dim = dim // num_heads |
|
|
self.scale = qk_scale or head_dim ** -0.5 |
|
|
|
|
|
|
|
|
self.relative_position_bias_table = nn.Parameter( |
|
|
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) |
|
|
|
|
|
|
|
|
coords_h = torch.arange(self.window_size[0]) |
|
|
coords_w = torch.arange(self.window_size[1]) |
|
|
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) |
|
|
coords_flatten = torch.flatten(coords, 1) |
|
|
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] |
|
|
relative_coords = relative_coords.permute(1, 2, 0).contiguous() |
|
|
relative_coords[:, :, 0] += self.window_size[0] - 1 |
|
|
relative_coords[:, :, 1] += self.window_size[1] - 1 |
|
|
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 |
|
|
relative_position_index = relative_coords.sum(-1) |
|
|
self.register_buffer("relative_position_index", relative_position_index) |
|
|
|
|
|
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) |
|
|
self.attn_drop = nn.Dropout(attn_drop) |
|
|
self.proj = nn.Linear(dim, dim) |
|
|
|
|
|
self.proj_drop = nn.Dropout(proj_drop) |
|
|
|
|
|
trunc_normal_(self.relative_position_bias_table, std=.02) |
|
|
self.softmax = nn.Softmax(dim=-1) |
|
|
|
|
|
def forward(self, x, mask=None): |
|
|
""" |
|
|
Args: |
|
|
x: input features with shape of (num_windows*B, N, C) |
|
|
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None |
|
|
""" |
|
|
B_, N, C = x.shape |
|
|
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) |
|
|
q, k, v = qkv[0], qkv[1], qkv[2] |
|
|
|
|
|
q = q * self.scale |
|
|
attn = (q @ k.transpose(-2, -1)) |
|
|
|
|
|
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( |
|
|
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) |
|
|
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() |
|
|
attn = attn + relative_position_bias.unsqueeze(0) |
|
|
|
|
|
if mask is not None: |
|
|
nW = mask.shape[0] |
|
|
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) |
|
|
attn = attn.view(-1, self.num_heads, N, N) |
|
|
attn = self.softmax(attn) |
|
|
else: |
|
|
attn = self.softmax(attn) |
|
|
|
|
|
attn = self.attn_drop(attn) |
|
|
|
|
|
x = (attn @ v).transpose(1, 2).reshape(B_, N, C) |
|
|
x = self.proj(x) |
|
|
x = self.proj_drop(x) |
|
|
return x |
|
|
|
|
|
def extra_repr(self) -> str: |
|
|
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}' |
|
|
|
|
|
def flops(self, N): |
|
|
|
|
|
flops = 0 |
|
|
|
|
|
flops += N * self.dim * 3 * self.dim |
|
|
|
|
|
flops += self.num_heads * N * (self.dim // self.num_heads) * N |
|
|
|
|
|
flops += self.num_heads * N * N * (self.dim // self.num_heads) |
|
|
|
|
|
flops += N * self.dim * self.dim |
|
|
return flops |
|
|
|
|
|
|
|
|
class SwinTransformerBlock(nn.Module): |
|
|
|
|
|
def __init__(self, dim, img_resolution, num_heads, window_size=7, shift_size=0, |
|
|
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., |
|
|
act_layer=nn.GELU, norm_layer=nn.LayerNorm): |
|
|
super().__init__() |
|
|
self.dim = dim |
|
|
self.img_resolution = img_resolution |
|
|
self.num_heads = num_heads |
|
|
self.window_size = window_size |
|
|
self.shift_size = shift_size |
|
|
self.mlp_ratio = mlp_ratio |
|
|
if min(self.img_resolution) <= self.window_size: |
|
|
|
|
|
self.shift_size = 0 |
|
|
self.window_size = min(self.img_resolution) |
|
|
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" |
|
|
|
|
|
self.norm1 = norm_layer(dim) |
|
|
self.attn = WindowAttention( |
|
|
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, |
|
|
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) |
|
|
|
|
|
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() |
|
|
self.norm2 = norm_layer(dim) |
|
|
mlp_hidden_dim = int(dim * mlp_ratio) |
|
|
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) |
|
|
|
|
|
if self.shift_size > 0: |
|
|
attn_mask = self.calculate_mask(self.img_resolution) |
|
|
else: |
|
|
attn_mask = None |
|
|
|
|
|
self.register_buffer("attn_mask", attn_mask) |
|
|
|
|
|
def calculate_mask(self, x_size): |
|
|
|
|
|
H, W = x_size |
|
|
img_mask = torch.zeros((1, H, W, 1)) |
|
|
h_slices = (slice(0, -self.window_size), |
|
|
slice(-self.window_size, -self.shift_size), |
|
|
slice(-self.shift_size, None)) |
|
|
w_slices = (slice(0, -self.window_size), |
|
|
slice(-self.window_size, -self.shift_size), |
|
|
slice(-self.shift_size, None)) |
|
|
cnt = 0 |
|
|
for h in h_slices: |
|
|
for w in w_slices: |
|
|
img_mask[:, h, w, :] = cnt |
|
|
cnt += 1 |
|
|
|
|
|
mask_windows = window_partition(img_mask, self.window_size) |
|
|
mask_windows = mask_windows.view(-1, self.window_size * self.window_size) |
|
|
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) |
|
|
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) |
|
|
|
|
|
return attn_mask |
|
|
|
|
|
def forward(self, x, x_size): |
|
|
H, W = x_size |
|
|
B, L, C = x.shape |
|
|
|
|
|
|
|
|
shortcut = x |
|
|
x = self.norm1(x) |
|
|
x = x.view(B, H, W, C) |
|
|
|
|
|
|
|
|
if self.shift_size > 0: |
|
|
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) |
|
|
else: |
|
|
shifted_x = x |
|
|
|
|
|
|
|
|
x_windows = window_partition(shifted_x, self.window_size) |
|
|
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) |
|
|
|
|
|
|
|
|
if self.img_resolution == x_size: |
|
|
attn_windows = self.attn(x_windows, mask=self.attn_mask) |
|
|
else: |
|
|
attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) |
|
|
|
|
|
|
|
|
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) |
|
|
shifted_x = window_reverse(attn_windows, self.window_size, H, W) |
|
|
|
|
|
|
|
|
if self.shift_size > 0: |
|
|
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) |
|
|
else: |
|
|
x = shifted_x |
|
|
x = x.view(B, H * W, C) |
|
|
|
|
|
|
|
|
x = shortcut + self.drop_path(x) |
|
|
x = x + self.drop_path(self.mlp(self.norm2(x))) |
|
|
|
|
|
return x |
|
|
|
|
|
def flops(self): |
|
|
flops = 0 |
|
|
H, W = self.img_resolution |
|
|
|
|
|
flops += self.dim * H * W |
|
|
|
|
|
nW = H * W / self.window_size / self.window_size |
|
|
flops += nW * self.attn.flops(self.window_size * self.window_size) |
|
|
|
|
|
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio |
|
|
|
|
|
flops += self.dim * H * W |
|
|
return flops |
|
|
|
|
|
|
|
|
class ResBlock(nn.Module): |
|
|
def __init__( |
|
|
self, n_feats, kernel_size, |
|
|
bias=True, bn=False, act=nn.ReLU(True), res_scale=0.1): |
|
|
|
|
|
super(ResBlock, self).__init__() |
|
|
m = [] |
|
|
for i in range(2): |
|
|
m.append(nn.Conv2d(n_feats, n_feats, kernel_size, padding=1, bias=bias)) |
|
|
if bn: |
|
|
m.append(nn.BatchNorm2d(n_feats)) |
|
|
if i == 0: |
|
|
m.append(act) |
|
|
|
|
|
self.body = nn.Sequential(*m) |
|
|
self.res_scale = res_scale |
|
|
|
|
|
def forward(self, x): |
|
|
res = self.body(x).mul(self.res_scale) |
|
|
res += x |
|
|
|
|
|
return res |
|
|
|
|
|
|
|
|
class DoubleBranchBlock(nn.Module): |
|
|
|
|
|
def __init__(self, dim, img_resolution, depth, num_heads, window_size, |
|
|
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., |
|
|
drop_path=0., norm_layer=nn.LayerNorm, use_checkpoint=False): |
|
|
|
|
|
super().__init__() |
|
|
self.dim = dim |
|
|
self.input_resolution = img_resolution |
|
|
self.depth = depth |
|
|
self.use_checkpoint = use_checkpoint |
|
|
|
|
|
|
|
|
self.FIEB = nn.ModuleList([ |
|
|
SwinTransformerBlock(dim=dim, img_resolution=img_resolution, |
|
|
num_heads=num_heads, window_size=window_size, |
|
|
shift_size=0 if (i % 2 == 0) else window_size // 2, |
|
|
mlp_ratio=mlp_ratio, |
|
|
qkv_bias=qkv_bias, qk_scale=qk_scale, |
|
|
drop=drop, attn_drop=attn_drop, |
|
|
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, |
|
|
norm_layer=norm_layer) |
|
|
for i in range(depth)]) |
|
|
|
|
|
SIEB = [] |
|
|
SIEB.append(ResBlock(dim, 3)) |
|
|
|
|
|
self.SIEB = nn.Sequential(*SIEB) |
|
|
|
|
|
|
|
|
def forward(self, x, x_size): |
|
|
H, W = x_size |
|
|
B, _, C = x.shape |
|
|
x_ = self.SIEB(x.transpose(1, 2).reshape(B, C, H, W)) |
|
|
for fbranch in self.FIEB: |
|
|
x = fbranch(x, x_size) |
|
|
return torch.cat((x, x_.flatten(2).transpose(1, 2)), dim=2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def flops(self): |
|
|
flops = 0 |
|
|
for blk in self.blocks: |
|
|
flops += blk.flops() |
|
|
|
|
|
|
|
|
return flops |
|
|
|
|
|
|
|
|
class DoublebranchFeatureExtractionBlock(nn.Module): |
|
|
|
|
|
def __init__(self, dim, img_resolution, depth, num_heads, window_size, |
|
|
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., |
|
|
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False, |
|
|
img_size=224, patch_size=4, resi_connection='1conv'): |
|
|
super(DoublebranchFeatureExtractionBlock, self).__init__() |
|
|
|
|
|
self.dim = dim |
|
|
self.input_resolution = img_resolution |
|
|
|
|
|
self.DFEB = DoubleBranchBlock(dim=dim, |
|
|
img_resolution=img_resolution, |
|
|
depth=depth, |
|
|
num_heads=num_heads, |
|
|
window_size=window_size, |
|
|
mlp_ratio=mlp_ratio, |
|
|
qkv_bias=qkv_bias, qk_scale=qk_scale, |
|
|
drop=drop, attn_drop=attn_drop, |
|
|
drop_path=drop_path, |
|
|
norm_layer=norm_layer, |
|
|
|
|
|
use_checkpoint=use_checkpoint) |
|
|
|
|
|
if resi_connection == '1conv': |
|
|
self.conv = nn.Conv2d(dim*2, dim, 3, 1, 1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.token_embed = PatchEmbed( |
|
|
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, |
|
|
norm_layer=None) |
|
|
|
|
|
self.token_unembed = PatchUnEmbed( |
|
|
img_size=img_size, patch_size=patch_size, in_chans=0, embed_dim=dim, |
|
|
norm_layer=None) |
|
|
|
|
|
def forward(self, x, x_size): |
|
|
x_token = self.DFEB(x, x_size) |
|
|
x = self.token_unembed(x_token, x_size) |
|
|
x = self.conv(x) |
|
|
x_token = self.token_embed(x) |
|
|
|
|
|
return x_token |
|
|
|
|
|
def flops(self): |
|
|
flops = 0 |
|
|
flops += self.residual_group.flops() |
|
|
H, W = self.input_resolution |
|
|
flops += 2 * H * W * (self.dim * 9 + 1) * self.dim * 2 |
|
|
flops += 2 * H * W * (self.dim * 2 * 9 + 1) * self.dim |
|
|
flops += self.patch_embed.flops() |
|
|
flops += self.patch_unembed.flops() |
|
|
|
|
|
return flops |
|
|
|
|
|
|
|
|
class PatchEmbed(nn.Module): |
|
|
|
|
|
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): |
|
|
super().__init__() |
|
|
img_size = to_2tuple(img_size) |
|
|
self.img_size = img_size |
|
|
|
|
|
self.in_chans = in_chans |
|
|
self.embed_dim = embed_dim |
|
|
|
|
|
if norm_layer is not None: |
|
|
self.norm = norm_layer(embed_dim) |
|
|
else: |
|
|
self.norm = None |
|
|
|
|
|
def forward(self, x): |
|
|
x = x.flatten(2).transpose(1, 2) |
|
|
if self.norm is not None: |
|
|
x = self.norm(x) |
|
|
return x |
|
|
|
|
|
def flops(self): |
|
|
flops = 0 |
|
|
H, W = self.img_size |
|
|
if self.norm is not None: |
|
|
flops += H * W * self.embed_dim |
|
|
return flops |
|
|
|
|
|
|
|
|
class PatchUnEmbed(nn.Module): |
|
|
|
|
|
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): |
|
|
super().__init__() |
|
|
img_size = to_2tuple(img_size) |
|
|
self.img_size = img_size |
|
|
|
|
|
self.in_chans = in_chans |
|
|
self.embed_dim = embed_dim |
|
|
|
|
|
def forward(self, x, x_size): |
|
|
B, HW, C = x.shape |
|
|
x = x.transpose(1, 2).view(B, -1, x_size[0], x_size[1]) |
|
|
return x |
|
|
|
|
|
def flops(self): |
|
|
flops = 0 |
|
|
return flops |
|
|
|
|
|
|
|
|
class MFAM(nn.Module): |
|
|
|
|
|
def __init__(self, img_size=64, patch_size=1, in_chans=1, |
|
|
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6], |
|
|
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, |
|
|
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, |
|
|
norm_layer=nn.LayerNorm, ape=False, patch_norm=True, |
|
|
use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', |
|
|
**kwargs): |
|
|
super(MFAM, self).__init__() |
|
|
|
|
|
num_in_ch = in_chans |
|
|
num_out_ch = in_chans |
|
|
self.img_range = img_range |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self.upscale = upscale |
|
|
self.upsampler = upsampler |
|
|
self.window_size = window_size |
|
|
|
|
|
self.conv_first = nn.Conv2d(num_in_ch, embed_dim, 3, 1, 1) |
|
|
|
|
|
self.num_layers = len(depths) |
|
|
self.embed_dim = embed_dim |
|
|
self.patch_norm = patch_norm |
|
|
self.num_features = embed_dim |
|
|
self.mlp_ratio = mlp_ratio |
|
|
|
|
|
|
|
|
self.patch_embed = PatchEmbed( |
|
|
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, |
|
|
norm_layer=norm_layer if self.patch_norm else None) |
|
|
|
|
|
self.img_resolution = self.patch_embed.img_size |
|
|
img_resolution = self.patch_embed.img_size |
|
|
|
|
|
|
|
|
self.patch_unembed = PatchUnEmbed( |
|
|
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim, |
|
|
norm_layer=norm_layer if self.patch_norm else None) |
|
|
|
|
|
self.pos_drop = nn.Dropout(p=drop_rate) |
|
|
|
|
|
|
|
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] |
|
|
|
|
|
|
|
|
self.layers = nn.ModuleList() |
|
|
for i_layer in range(self.num_layers): |
|
|
layer = DoublebranchFeatureExtractionBlock(dim=embed_dim, |
|
|
img_resolution=(img_resolution[0], |
|
|
img_resolution[1]), |
|
|
depth=depths[i_layer], |
|
|
num_heads=num_heads[i_layer], |
|
|
window_size=window_size, |
|
|
mlp_ratio=self.mlp_ratio, |
|
|
qkv_bias=qkv_bias, qk_scale=qk_scale, |
|
|
drop=drop_rate, attn_drop=attn_drop_rate, |
|
|
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], |
|
|
norm_layer=norm_layer, |
|
|
downsample=None, |
|
|
use_checkpoint=use_checkpoint, |
|
|
img_size=img_size, |
|
|
patch_size=patch_size, |
|
|
resi_connection=resi_connection |
|
|
|
|
|
) |
|
|
self.layers.append(layer) |
|
|
self.norm = norm_layer(self.num_features) |
|
|
|
|
|
|
|
|
if resi_connection == '1conv': |
|
|
self.conv_after_body = ResBlock(embed_dim, 3) |
|
|
|
|
|
self.conv_last = nn.Conv2d(embed_dim, num_out_ch//2, 3, 1, 1) |
|
|
|
|
|
self.apply(self._init_weights) |
|
|
|
|
|
def _init_weights(self, m): |
|
|
if isinstance(m, nn.Linear): |
|
|
trunc_normal_(m.weight, std=.02) |
|
|
if isinstance(m, nn.Linear) and m.bias is not None: |
|
|
nn.init.constant_(m.bias, 0) |
|
|
elif isinstance(m, nn.LayerNorm): |
|
|
nn.init.constant_(m.bias, 0) |
|
|
nn.init.constant_(m.weight, 1.0) |
|
|
|
|
|
@torch.jit.ignore |
|
|
def no_weight_decay(self): |
|
|
return {'absolute_pos_embed'} |
|
|
|
|
|
@torch.jit.ignore |
|
|
def no_weight_decay_keywords(self): |
|
|
return {'relative_position_bias_table'} |
|
|
|
|
|
def check_image_size(self, x): |
|
|
_, _, h, w = x.size() |
|
|
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size |
|
|
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size |
|
|
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect') |
|
|
return x |
|
|
|
|
|
def forward_features(self, x): |
|
|
x_size = (x.shape[2], x.shape[3]) |
|
|
x = self.patch_embed(x) |
|
|
|
|
|
|
|
|
x = self.pos_drop(x) |
|
|
res = 0. |
|
|
for layer in self.layers: |
|
|
x = layer(x, x_size) |
|
|
res += x |
|
|
x = self.norm(x+res) |
|
|
x = self.patch_unembed(x, x_size) |
|
|
|
|
|
return x |
|
|
|
|
|
def forward(self, x): |
|
|
H, W = x.shape[2:] |
|
|
x = self.check_image_size(x) |
|
|
|
|
|
x_first = self.conv_first(x) |
|
|
res = self.conv_after_body(self.forward_features(x_first)) + x_first |
|
|
|
|
|
x = self.conv_last(res) |
|
|
|
|
|
return x[:, :, :H * self.upscale, :W * self.upscale] |
|
|
|
|
|
def flops(self): |
|
|
flops = 0 |
|
|
H, W = self.img_resolution |
|
|
flops += 2 * H * W * (12 * 9 + 1) * self.embed_dim * 2 |
|
|
flops += self.patch_embed.flops() |
|
|
for i, layer in enumerate(self.layers): |
|
|
flops += layer.flops() |
|
|
flops += 2 * H * W * (self.embed_dim * 9 + 1) * self.embed_dim * 2 |
|
|
flops += 2 * H * W * (self.embed_dim * 9 + 1) * 12 |
|
|
return flops |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|