孙聪聪 commited on
Commit
be36716
·
0 Parent(s):

Initial upload of AST deraindrop model

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ASTForRestoration"
4
+ ],
5
+ "attn_drop_rate": 0.0,
6
+ "dd_in": 3,
7
+ "depths": [
8
+ 1,
9
+ 2,
10
+ 8,
11
+ 8,
12
+ 2,
13
+ 8,
14
+ 8,
15
+ 2,
16
+ 1
17
+ ],
18
+ "drop_path_rate": 0.1,
19
+ "drop_rate": 0.0,
20
+ "embed_dim": 32,
21
+ "img_size": 256,
22
+ "in_chans": 3,
23
+ "mlp_ratio": 4.0,
24
+ "model_type": "ast",
25
+ "num_heads": [
26
+ 1,
27
+ 2,
28
+ 4,
29
+ 8,
30
+ 16,
31
+ 16,
32
+ 8,
33
+ 4,
34
+ 2
35
+ ],
36
+ "patch_norm": true,
37
+ "qk_scale": null,
38
+ "qkv_bias": true,
39
+ "shift_flag": true,
40
+ "token_mlp": "frfn",
41
+ "token_projection": "linear",
42
+ "torch_dtype": "float32",
43
+ "transformers_version": "4.46.3",
44
+ "use_checkpoint": false,
45
+ "win_size": 8
46
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab3c6779bb11f51cf1eee39fe7a9a135a8272875c5d3fd64e0013b2af0dbf629
3
+ size 262210108
modeling_ast.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.utils.checkpoint as checkpoint
4
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
5
+ import torch.nn.functional as F
6
+ from einops import rearrange, repeat
7
+ from einops.layers.torch import Rearrange
8
+ import math
9
+ import numpy as np
10
+ import time
11
+ from torch import einsum
12
+ import json
13
+ import os
14
+ import argparse
15
+ from transformers import PretrainedConfig, PreTrainedModel
16
+
17
+
18
+ #################################################################################
19
+ # #
20
+ # PART 1: 您的模型定义 (From the file you provided) #
21
+ # #
22
+ #################################################################################
23
+
24
+ def conv(in_channels, out_channels, kernel_size, bias=False, stride=1):
25
+ return nn.Conv2d(
26
+ in_channels, out_channels, kernel_size,
27
+ padding=(kernel_size // 2), bias=bias, stride=stride)
28
+
29
+
30
+ class ConvBlock(nn.Module):
31
+ def __init__(self, in_channel, out_channel, strides=1):
32
+ super(ConvBlock, self).__init__()
33
+ self.strides = strides
34
+ self.in_channel = in_channel
35
+ self.out_channel = out_channel
36
+ self.block = nn.Sequential(
37
+ nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=strides, padding=1),
38
+ nn.LeakyReLU(inplace=True),
39
+ nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=strides, padding=1),
40
+ nn.LeakyReLU(inplace=True),
41
+ )
42
+ self.conv11 = nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=strides, padding=0)
43
+
44
+ def forward(self, x):
45
+ out1 = self.block(x)
46
+ out2 = self.conv11(x)
47
+ out = out1 + out2
48
+ return out
49
+
50
+
51
+ class LinearProjection(nn.Module):
52
+ def __init__(self, dim, heads=8, dim_head=64, dropout=0., bias=True):
53
+ super().__init__()
54
+ inner_dim = dim_head * heads
55
+ self.heads = heads
56
+ self.to_q = nn.Linear(dim, inner_dim, bias=bias)
57
+ self.to_kv = nn.Linear(dim, inner_dim * 2, bias=bias)
58
+ self.dim = dim
59
+ self.inner_dim = inner_dim
60
+
61
+ def forward(self, x, attn_kv=None):
62
+ B_, N, C = x.shape
63
+ if attn_kv is not None:
64
+ attn_kv = attn_kv.unsqueeze(0).repeat(B_, 1, 1)
65
+ else:
66
+ attn_kv = x
67
+ N_kv = attn_kv.size(1)
68
+ q = self.to_q(x).reshape(B_, N, 1, self.heads, C // self.heads).permute(2, 0, 3, 1, 4)
69
+ kv = self.to_kv(attn_kv).reshape(B_, N_kv, 2, self.heads, C // self.heads).permute(2, 0, 3, 1, 4)
70
+ q = q[0]
71
+ k, v = kv[0], kv[1]
72
+ return q, k, v
73
+
74
+
75
+ class WindowAttention(nn.Module):
76
+ def __init__(self, dim, win_size, num_heads, token_projection='linear', qkv_bias=True, qk_scale=None, attn_drop=0.,
77
+ proj_drop=0.):
78
+ super().__init__()
79
+ self.dim = dim
80
+ self.win_size = win_size
81
+ self.num_heads = num_heads
82
+ head_dim = dim // num_heads
83
+ self.scale = qk_scale or head_dim ** -0.5
84
+ self.relative_position_bias_table = nn.Parameter(
85
+ torch.zeros((2 * win_size[0] - 1) * (2 * win_size[1] - 1), num_heads))
86
+ coords_h = torch.arange(self.win_size[0])
87
+ coords_w = torch.arange(self.win_size[1])
88
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij"))
89
+ coords_flatten = torch.flatten(coords, 1)
90
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
91
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
92
+ relative_coords[:, :, 0] += self.win_size[0] - 1
93
+ relative_coords[:, :, 1] += self.win_size[1] - 1
94
+ relative_coords[:, :, 0] *= 2 * self.win_size[1] - 1
95
+ relative_position_index = relative_coords.sum(-1)
96
+ self.register_buffer("relative_position_index", relative_position_index)
97
+ trunc_normal_(self.relative_position_bias_table, std=.02)
98
+ if token_projection == 'linear':
99
+ self.qkv = LinearProjection(dim, num_heads, dim // num_heads, bias=qkv_bias)
100
+ else:
101
+ raise Exception("Projection error!")
102
+ self.token_projection = token_projection
103
+ self.attn_drop = nn.Dropout(attn_drop)
104
+ self.proj = nn.Linear(dim, dim)
105
+ self.proj_drop = nn.Dropout(proj_drop)
106
+ self.softmax = nn.Softmax(dim=-1)
107
+
108
+ def forward(self, x, attn_kv=None, mask=None):
109
+ B_, N, C = x.shape
110
+ q, k, v = self.qkv(x, attn_kv)
111
+ q = q * self.scale
112
+ attn = (q @ k.transpose(-2, -1))
113
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
114
+ self.win_size[0] * self.win_size[1], self.win_size[0] * self.win_size[1], -1)
115
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
116
+ ratio = attn.size(-1) // relative_position_bias.size(-1)
117
+ relative_position_bias = repeat(relative_position_bias, 'nH l c -> nH l (c d)', d=ratio)
118
+ attn = attn + relative_position_bias.unsqueeze(0)
119
+ if mask is not None:
120
+ nW = mask.shape[0]
121
+ mask = repeat(mask, 'nW m n -> nW m (n d)', d=ratio)
122
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N * ratio) + mask.unsqueeze(1).unsqueeze(0)
123
+ attn = attn.view(-1, self.num_heads, N, N * ratio)
124
+ attn = self.softmax(attn)
125
+ else:
126
+ attn = self.softmax(attn)
127
+ attn = self.attn_drop(attn)
128
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
129
+ x = self.proj(x)
130
+ x = self.proj_drop(x)
131
+ return x
132
+
133
+
134
+ class WindowAttention_sparse(nn.Module):
135
+ def __init__(self, dim, win_size, num_heads, token_projection='linear', qkv_bias=True, qk_scale=None, attn_drop=0.,
136
+ proj_drop=0.):
137
+ super().__init__()
138
+ self.dim = dim
139
+ self.win_size = win_size
140
+ self.num_heads = num_heads
141
+ head_dim = dim // num_heads
142
+ self.scale = qk_scale or head_dim ** -0.5
143
+ self.relative_position_bias_table = nn.Parameter(
144
+ torch.zeros((2 * win_size[0] - 1) * (2 * win_size[1] - 1), num_heads))
145
+ coords_h = torch.arange(self.win_size[0])
146
+ coords_w = torch.arange(self.win_size[1])
147
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing="ij"))
148
+ coords_flatten = torch.flatten(coords, 1)
149
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
150
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous()
151
+ relative_coords[:, :, 0] += self.win_size[0] - 1
152
+ relative_coords[:, :, 1] += self.win_size[1] - 1
153
+ relative_coords[:, :, 0] *= 2 * self.win_size[1] - 1
154
+ relative_position_index = relative_coords.sum(-1)
155
+ self.register_buffer("relative_position_index", relative_position_index)
156
+ trunc_normal_(self.relative_position_bias_table, std=.02)
157
+ if token_projection == 'linear':
158
+ self.qkv = LinearProjection(dim, num_heads, dim // num_heads, bias=qkv_bias)
159
+ else:
160
+ raise Exception("Projection error!")
161
+ self.token_projection = token_projection
162
+ self.attn_drop = nn.Dropout(attn_drop)
163
+ self.proj = nn.Linear(dim, dim)
164
+ self.proj_drop = nn.Dropout(proj_drop)
165
+ self.softmax = nn.Softmax(dim=-1)
166
+ self.relu = nn.ReLU()
167
+ self.w = nn.Parameter(torch.ones(2))
168
+
169
+ def forward(self, x, attn_kv=None, mask=None):
170
+ B_, N, C = x.shape
171
+ q, k, v = self.qkv(x, attn_kv)
172
+ q = q * self.scale
173
+ attn = (q @ k.transpose(-2, -1))
174
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
175
+ self.win_size[0] * self.win_size[1], self.win_size[0] * self.win_size[1], -1)
176
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
177
+ ratio = attn.size(-1) // relative_position_bias.size(-1)
178
+ relative_position_bias = repeat(relative_position_bias, 'nH l c -> nH l (c d)', d=ratio)
179
+ attn = attn + relative_position_bias.unsqueeze(0)
180
+ if mask is not None:
181
+ nW = mask.shape[0]
182
+ mask = repeat(mask, 'nW m n -> nW m (n d)', d=ratio)
183
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N * ratio) + mask.unsqueeze(1).unsqueeze(0)
184
+ attn = attn.view(-1, self.num_heads, N, N * ratio)
185
+ attn0 = self.softmax(attn)
186
+ attn1 = self.relu(attn) ** 2
187
+ else:
188
+ attn0 = self.softmax(attn)
189
+ attn1 = self.relu(attn) ** 2
190
+ w1 = torch.exp(self.w[0]) / torch.sum(torch.exp(self.w))
191
+ w2 = torch.exp(self.w[1]) / torch.sum(torch.exp(self.w))
192
+ attn = attn0 * w1 + attn1 * w2
193
+ attn = self.attn_drop(attn)
194
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
195
+ x = self.proj(x)
196
+ x = self.proj_drop(x)
197
+ return x
198
+
199
+
200
+ class Mlp(nn.Module):
201
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
202
+ super().__init__()
203
+ out_features = out_features or in_features
204
+ hidden_features = hidden_features or in_features
205
+ self.fc1 = nn.Linear(in_features, hidden_features)
206
+ self.act = act_layer()
207
+ self.fc2 = nn.Linear(hidden_features, out_features)
208
+ self.drop = nn.Dropout(drop)
209
+
210
+ def forward(self, x):
211
+ x = self.fc1(x)
212
+ x = self.act(x)
213
+ x = self.drop(x)
214
+ x = self.fc2(x)
215
+ x = self.drop(x)
216
+ return x
217
+
218
+
219
+ class LeFF(nn.Module):
220
+ def __init__(self, dim=32, hidden_dim=128, act_layer=nn.GELU, drop=0., use_eca=False):
221
+ super().__init__()
222
+ self.linear1 = nn.Sequential(nn.Linear(dim, hidden_dim), act_layer())
223
+ self.dwconv = nn.Sequential(
224
+ nn.Conv2d(hidden_dim, hidden_dim, groups=hidden_dim, kernel_size=3, stride=1, padding=1), act_layer())
225
+ self.linear2 = nn.Sequential(nn.Linear(hidden_dim, dim))
226
+ self.eca = nn.Identity()
227
+
228
+ def forward(self, x):
229
+ bs, hw, c = x.size()
230
+ hh = int(math.sqrt(hw))
231
+ x = self.linear1(x)
232
+ x = rearrange(x, ' b (h w) (c) -> b c h w ', h=hh, w=hh)
233
+ x = self.dwconv(x)
234
+ x = rearrange(x, ' b c h w -> b (h w) c', h=hh, w=hh)
235
+ x = self.linear2(x)
236
+ x = self.eca(x)
237
+ return x
238
+
239
+
240
+ class FRFN(nn.Module):
241
+ def __init__(self, dim=32, hidden_dim=128, act_layer=nn.GELU, drop=0., use_eca=False):
242
+ super().__init__()
243
+ self.linear1 = nn.Sequential(nn.Linear(dim, hidden_dim * 2),
244
+ act_layer())
245
+ self.dwconv = nn.Sequential(
246
+ nn.Conv2d(hidden_dim, hidden_dim, groups=hidden_dim, kernel_size=3, stride=1, padding=1),
247
+ act_layer())
248
+ self.linear2 = nn.Sequential(nn.Linear(hidden_dim, dim))
249
+ self.dim = dim
250
+ self.hidden_dim = hidden_dim
251
+
252
+ self.dim_conv = self.dim // 4
253
+ self.dim_untouched = self.dim - self.dim_conv
254
+ self.partial_conv3 = nn.Conv2d(self.dim_conv, self.dim_conv, 3, 1, 1, bias=False)
255
+
256
+ def forward(self, x):
257
+ bs, hw, c = x.size()
258
+ hh = int(math.sqrt(hw))
259
+ x = rearrange(x, ' b (h w) (c) -> b c h w ', h=hh, w=hh)
260
+ x1, x2, = torch.split(x, [self.dim_conv, self.dim_untouched], dim=1)
261
+ x1 = self.partial_conv3(x1)
262
+ x = torch.cat((x1, x2), 1)
263
+ x = rearrange(x, ' b c h w -> b (h w) c', h=hh, w=hh)
264
+ x = self.linear1(x)
265
+ x_1, x_2 = x.chunk(2, dim=-1)
266
+ x_1 = rearrange(x_1, ' b (h w) (c) -> b c h w ', h=hh, w=hh)
267
+ x_1 = self.dwconv(x_1)
268
+ x_1 = rearrange(x_1, ' b c h w -> b (h w) c', h=hh, w=hh)
269
+ x = x_1 * x_2
270
+ x = self.linear2(x)
271
+ return x
272
+
273
+
274
+ def window_partition(x, win_size, dilation_rate=1):
275
+ B, H, W, C = x.shape
276
+ if dilation_rate != 1:
277
+ x = x.permute(0, 3, 1, 2)
278
+ assert type(dilation_rate) is int, 'dilation_rate should be a int'
279
+ x = F.unfold(x, kernel_size=win_size, dilation=dilation_rate, padding=4 * (dilation_rate - 1), stride=win_size)
280
+ windows = x.permute(0, 2, 1).contiguous().view(-1, C, win_size, win_size)
281
+ windows = windows.permute(0, 2, 3, 1).contiguous()
282
+ else:
283
+ x = x.view(B, H // win_size, win_size, W // win_size, win_size, C)
284
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, win_size, win_size, C)
285
+ return windows
286
+
287
+
288
+ def window_reverse(windows, win_size, H, W, dilation_rate=1):
289
+ B = int(windows.shape[0] / (H * W / win_size / win_size))
290
+ x = windows.view(B, H // win_size, W // win_size, win_size, win_size, -1)
291
+ if dilation_rate != 1:
292
+ x = windows.permute(0, 5, 3, 4, 1, 2).contiguous()
293
+ x = F.fold(x, (H, W), kernel_size=win_size, dilation=dilation_rate, padding=4 * (dilation_rate - 1),
294
+ stride=win_size)
295
+ else:
296
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
297
+ return x
298
+
299
+
300
+ class Downsample(nn.Module):
301
+ def __init__(self, in_channel, out_channel):
302
+ super(Downsample, self).__init__()
303
+ self.conv = nn.Sequential(nn.Conv2d(in_channel, out_channel, kernel_size=4, stride=2, padding=1))
304
+
305
+ def forward(self, x):
306
+ B, L, C = x.shape
307
+ H = int(math.sqrt(L))
308
+ W = int(math.sqrt(L))
309
+ x = x.transpose(1, 2).contiguous().view(B, C, H, W)
310
+ out = self.conv(x).flatten(2).transpose(1, 2).contiguous()
311
+ return out
312
+
313
+
314
+ class Upsample(nn.Module):
315
+ def __init__(self, in_channel, out_channel):
316
+ super(Upsample, self).__init__()
317
+ self.deconv = nn.Sequential(nn.ConvTranspose2d(in_channel, out_channel, kernel_size=2, stride=2))
318
+
319
+ def forward(self, x):
320
+ B, L, C = x.shape
321
+ H = int(math.sqrt(L))
322
+ W = int(math.sqrt(L))
323
+ x = x.transpose(1, 2).contiguous().view(B, C, H, W)
324
+ out = self.deconv(x).flatten(2).transpose(1, 2).contiguous()
325
+ return out
326
+
327
+
328
+ class InputProj(nn.Module):
329
+ def __init__(self, in_channel=3, out_channel=64, kernel_size=3, stride=1, norm_layer=None, act_layer=nn.LeakyReLU):
330
+ super().__init__()
331
+ self.proj = nn.Sequential(
332
+ nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=kernel_size // 2),
333
+ act_layer(inplace=True))
334
+ self.norm = norm_layer(out_channel) if norm_layer is not None else None
335
+
336
+ def forward(self, x):
337
+ B, C, H, W = x.shape
338
+ x = self.proj(x).flatten(2).transpose(1, 2).contiguous()
339
+ if self.norm is not None:
340
+ x = self.norm(x)
341
+ return x
342
+
343
+
344
+ class OutputProj(nn.Module):
345
+ def __init__(self, in_channel=64, out_channel=3, kernel_size=3, stride=1, norm_layer=None, act_layer=None):
346
+ super().__init__()
347
+ self.proj = nn.Sequential(
348
+ nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=kernel_size // 2))
349
+ if act_layer is not None:
350
+ self.proj.add_module(str(len(self.proj)), act_layer(inplace=True))
351
+ self.norm = norm_layer(out_channel) if norm_layer is not None else None
352
+
353
+ def forward(self, x):
354
+ B, L, C = x.shape
355
+ H = int(math.sqrt(L))
356
+ W = int(math.sqrt(L))
357
+ x = x.transpose(1, 2).view(B, C, H, W)
358
+ x = self.proj(x)
359
+ if self.norm is not None:
360
+ x = self.norm(x)
361
+ return x
362
+
363
+
364
+ class TransformerBlock(nn.Module):
365
+ def __init__(self, dim, input_resolution, num_heads, win_size=8, shift_size=0, mlp_ratio=4., qkv_bias=True,
366
+ qk_scale=None, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
367
+ token_projection='linear', token_mlp='leff', att=True, sparseAtt=False):
368
+ super().__init__()
369
+ self.att = att
370
+ self.sparseAtt = sparseAtt
371
+ self.dim = dim
372
+ self.input_resolution = input_resolution
373
+ self.num_heads = num_heads
374
+ self.win_size = win_size
375
+ self.shift_size = shift_size
376
+ self.mlp_ratio = mlp_ratio
377
+ if min(self.input_resolution) <= self.win_size:
378
+ self.shift_size = 0
379
+ self.win_size = min(self.input_resolution)
380
+ assert 0 <= self.shift_size < self.win_size, "shift_size must in 0-win_size"
381
+ if self.att:
382
+ self.norm1 = norm_layer(dim)
383
+ if self.sparseAtt:
384
+ self.attn = WindowAttention_sparse(dim, win_size=to_2tuple(self.win_size), num_heads=num_heads,
385
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop,
386
+ proj_drop=drop, token_projection=token_projection)
387
+ else:
388
+ self.attn = WindowAttention(dim, win_size=to_2tuple(self.win_size), num_heads=num_heads,
389
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,
390
+ token_projection=token_projection)
391
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
392
+ self.norm2 = norm_layer(dim)
393
+ mlp_hidden_dim = int(dim * mlp_ratio)
394
+ if token_mlp in ['ffn', 'mlp']:
395
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
396
+ elif token_mlp == 'leff':
397
+ self.mlp = LeFF(dim, mlp_hidden_dim, act_layer=act_layer, drop=drop)
398
+ elif token_mlp == 'frfn':
399
+ self.mlp = FRFN(dim, mlp_hidden_dim, act_layer=act_layer, drop=drop)
400
+ else:
401
+ raise Exception("FFN error!")
402
+
403
+ def forward(self, x, mask=None):
404
+ B, L, C = x.shape
405
+ H = int(math.sqrt(L))
406
+ W = int(math.sqrt(L))
407
+ attn_mask = None
408
+ if self.shift_size > 0:
409
+ shift_mask = torch.zeros((1, H, W, 1), device=x.device)
410
+ h_slices = (slice(0, -self.win_size), slice(-self.win_size, -self.shift_size),
411
+ slice(-self.shift_size, None))
412
+ w_slices = (slice(0, -self.win_size), slice(-self.win_size, -self.shift_size),
413
+ slice(-self.shift_size, None))
414
+ cnt = 0
415
+ for h in h_slices:
416
+ for w in w_slices:
417
+ shift_mask[:, h, w, :] = cnt
418
+ cnt += 1
419
+ shift_mask_windows = window_partition(shift_mask, self.win_size)
420
+ shift_mask_windows = shift_mask_windows.view(-1, self.win_size * self.win_size)
421
+ attn_mask = shift_mask_windows.unsqueeze(1) - shift_mask_windows.unsqueeze(2)
422
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
423
+ shortcut = x
424
+ if self.att:
425
+ x = self.norm1(x)
426
+ x = x.view(B, H, W, C)
427
+ if self.shift_size > 0:
428
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
429
+ else:
430
+ shifted_x = x
431
+ x_windows = window_partition(shifted_x, self.win_size)
432
+ x_windows = x_windows.view(-1, self.win_size * self.win_size, C)
433
+ attn_windows = self.attn(x_windows, mask=attn_mask)
434
+ attn_windows = attn_windows.view(-1, self.win_size, self.win_size, C)
435
+ shifted_x = window_reverse(attn_windows, self.win_size, H, W)
436
+ if self.shift_size > 0:
437
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
438
+ else:
439
+ x = shifted_x
440
+ x = x.view(B, H * W, C)
441
+ x = shortcut + self.drop_path(x)
442
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
443
+ return x
444
+
445
+
446
+ class BasicASTLayer(nn.Module):
447
+ def __init__(self, dim, output_dim, input_resolution, depth, num_heads, win_size, mlp_ratio=4., qkv_bias=True,
448
+ qk_scale=None, drop=0., attn_drop=0., drop_path=0., norm_layer=nn.LayerNorm, use_checkpoint=False,
449
+ token_projection='linear', token_mlp='ffn', shift_flag=True, att=False, sparseAtt=False):
450
+ super().__init__()
451
+ self.att = att
452
+ self.sparseAtt = sparseAtt
453
+ self.depth = depth
454
+ self.use_checkpoint = use_checkpoint
455
+ if shift_flag:
456
+ self.blocks = nn.ModuleList([
457
+ TransformerBlock(dim=dim, input_resolution=input_resolution, num_heads=num_heads, win_size=win_size,
458
+ shift_size=0 if (i % 2 == 0) else win_size // 2, mlp_ratio=mlp_ratio,
459
+ qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop, attn_drop=attn_drop,
460
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
461
+ norm_layer=norm_layer, token_projection=token_projection, token_mlp=token_mlp,
462
+ att=self.att, sparseAtt=self.sparseAtt)
463
+ for i in range(depth)])
464
+ else:
465
+ self.blocks = nn.ModuleList([
466
+ TransformerBlock(dim=dim, input_resolution=input_resolution, num_heads=num_heads, win_size=win_size,
467
+ shift_size=0, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop,
468
+ attn_drop=attn_drop,
469
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
470
+ norm_layer=norm_layer, token_projection=token_projection, token_mlp=token_mlp,
471
+ att=self.att, sparseAtt=self.sparseAtt)
472
+ for i in range(depth)])
473
+
474
+ def forward(self, x, mask=None):
475
+ for blk in self.blocks:
476
+ if self.use_checkpoint:
477
+ # Note: checkpoint doesn't support mask argument, so we pass it as None
478
+ x = checkpoint.checkpoint(blk, x, None)
479
+ else:
480
+ x = blk(x, mask)
481
+ return x
482
+
483
+
484
+ class AST(nn.Module):
485
+ def __init__(self, img_size=256, in_chans=3, dd_in=3, embed_dim=32, depths=[2, 2, 2, 2, 2, 2, 2, 2, 2],
486
+ num_heads=[1, 2, 4, 8, 16, 16, 8, 4, 2], win_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
487
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, patch_norm=True,
488
+ use_checkpoint=False, token_projection='linear', token_mlp='leff', dowsample=Downsample,
489
+ upsample=Upsample, shift_flag=True, **kwargs):
490
+ super().__init__()
491
+ self.num_enc_layers = len(depths) // 2
492
+ self.num_dec_layers = len(depths) // 2
493
+ self.embed_dim = embed_dim
494
+ self.patch_norm = patch_norm
495
+ self.mlp_ratio = mlp_ratio
496
+ self.token_projection = token_projection
497
+ self.mlp = token_mlp
498
+ self.win_size = win_size
499
+ self.reso = img_size
500
+ self.pos_drop = nn.Dropout(p=drop_rate)
501
+ self.dd_in = dd_in
502
+ enc_dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths[:self.num_enc_layers]))]
503
+ conv_dpr = [drop_path_rate] * depths[4]
504
+ dec_dpr = enc_dpr[::-1]
505
+ self.input_proj = InputProj(in_channel=dd_in, out_channel=embed_dim, kernel_size=3, stride=1,
506
+ act_layer=nn.LeakyReLU)
507
+ self.output_proj = OutputProj(in_channel=2 * embed_dim, out_channel=in_chans, kernel_size=3, stride=1)
508
+ # Encoder
509
+ self.encoderlayer_0 = BasicASTLayer(dim=embed_dim, output_dim=embed_dim, input_resolution=(img_size, img_size),
510
+ depth=depths[0], num_heads=num_heads[0], win_size=win_size,
511
+ mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
512
+ drop=drop_rate, attn_drop=attn_drop_rate,
513
+ drop_path=enc_dpr[sum(depths[:0]):sum(depths[:1])], norm_layer=norm_layer,
514
+ use_checkpoint=use_checkpoint, token_projection=token_projection,
515
+ token_mlp=token_mlp, shift_flag=shift_flag, att=False, sparseAtt=False)
516
+ self.dowsample_0 = dowsample(embed_dim, embed_dim * 2)
517
+ self.encoderlayer_1 = BasicASTLayer(dim=embed_dim * 2, output_dim=embed_dim * 2,
518
+ input_resolution=(img_size // 2, img_size // 2), depth=depths[1],
519
+ num_heads=num_heads[1], win_size=win_size, mlp_ratio=self.mlp_ratio,
520
+ qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate,
521
+ attn_drop=attn_drop_rate,
522
+ drop_path=enc_dpr[sum(depths[:1]):sum(depths[:2])], norm_layer=norm_layer,
523
+ use_checkpoint=use_checkpoint, token_projection=token_projection,
524
+ token_mlp=token_mlp, shift_flag=shift_flag, att=False, sparseAtt=False)
525
+ self.dowsample_1 = dowsample(embed_dim * 2, embed_dim * 4)
526
+ self.encoderlayer_2 = BasicASTLayer(dim=embed_dim * 4, output_dim=embed_dim * 4,
527
+ input_resolution=(img_size // (2 ** 2), img_size // (2 ** 2)),
528
+ depth=depths[2], num_heads=num_heads[2], win_size=win_size,
529
+ mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
530
+ drop=drop_rate, attn_drop=attn_drop_rate,
531
+ drop_path=enc_dpr[sum(depths[:2]):sum(depths[:3])], norm_layer=norm_layer,
532
+ use_checkpoint=use_checkpoint, token_projection=token_projection,
533
+ token_mlp=token_mlp, shift_flag=shift_flag, att=False, sparseAtt=False)
534
+ self.dowsample_2 = dowsample(embed_dim * 4, embed_dim * 8)
535
+ self.encoderlayer_3 = BasicASTLayer(dim=embed_dim * 8, output_dim=embed_dim * 8,
536
+ input_resolution=(img_size // (2 ** 3), img_size // (2 ** 3)),
537
+ depth=depths[3], num_heads=num_heads[3], win_size=win_size,
538
+ mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
539
+ drop=drop_rate, attn_drop=attn_drop_rate,
540
+ drop_path=enc_dpr[sum(depths[:3]):sum(depths[:4])], norm_layer=norm_layer,
541
+ use_checkpoint=use_checkpoint, token_projection=token_projection,
542
+ token_mlp=token_mlp, shift_flag=shift_flag, att=False, sparseAtt=False)
543
+ self.dowsample_3 = dowsample(embed_dim * 8, embed_dim * 16)
544
+ # Bottleneck
545
+ self.conv = BasicASTLayer(dim=embed_dim * 16, output_dim=embed_dim * 16,
546
+ input_resolution=(img_size // (2 ** 4), img_size // (2 ** 4)), depth=depths[4],
547
+ num_heads=num_heads[4], win_size=win_size, mlp_ratio=self.mlp_ratio,
548
+ qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate,
549
+ drop_path=conv_dpr, norm_layer=norm_layer, use_checkpoint=use_checkpoint,
550
+ token_projection=token_projection, token_mlp=token_mlp, shift_flag=shift_flag,
551
+ att=True, sparseAtt=True)
552
+ # Decoder
553
+ self.upsample_0 = upsample(embed_dim * 16, embed_dim * 8)
554
+ self.decoderlayer_0 = BasicASTLayer(dim=embed_dim * 16, output_dim=embed_dim * 16,
555
+ input_resolution=(img_size // (2 ** 3), img_size // (2 ** 3)),
556
+ depth=depths[5], num_heads=num_heads[5], win_size=win_size,
557
+ mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
558
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dec_dpr[:depths[5]],
559
+ norm_layer=norm_layer, use_checkpoint=use_checkpoint,
560
+ token_projection=token_projection, token_mlp=token_mlp,
561
+ shift_flag=shift_flag, att=True, sparseAtt=True)
562
+ self.upsample_1 = upsample(embed_dim * 16, embed_dim * 4)
563
+ self.decoderlayer_1 = BasicASTLayer(dim=embed_dim * 8, output_dim=embed_dim * 8,
564
+ input_resolution=(img_size // (2 ** 2), img_size // (2 ** 2)),
565
+ depth=depths[6], num_heads=num_heads[6], win_size=win_size,
566
+ mlp_ratio=self.mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
567
+ drop=drop_rate, attn_drop=attn_drop_rate,
568
+ drop_path=dec_dpr[sum(depths[5:6]):sum(depths[5:7])], norm_layer=norm_layer,
569
+ use_checkpoint=use_checkpoint, token_projection=token_projection,
570
+ token_mlp=token_mlp, shift_flag=shift_flag, att=True, sparseAtt=True)
571
+ self.upsample_2 = upsample(embed_dim * 8, embed_dim * 2)
572
+ self.decoderlayer_2 = BasicASTLayer(dim=embed_dim * 4, output_dim=embed_dim * 4,
573
+ input_resolution=(img_size // 2, img_size // 2), depth=depths[7],
574
+ num_heads=num_heads[7], win_size=win_size, mlp_ratio=self.mlp_ratio,
575
+ qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate,
576
+ attn_drop=attn_drop_rate,
577
+ drop_path=dec_dpr[sum(depths[5:7]):sum(depths[5:8])], norm_layer=norm_layer,
578
+ use_checkpoint=use_checkpoint, token_projection=token_projection,
579
+ token_mlp=token_mlp, shift_flag=shift_flag, att=True, sparseAtt=True)
580
+ self.upsample_3 = upsample(embed_dim * 4, embed_dim)
581
+ self.decoderlayer_3 = BasicASTLayer(dim=embed_dim * 2, output_dim=embed_dim * 2,
582
+ input_resolution=(img_size, img_size), depth=depths[8],
583
+ num_heads=num_heads[8], win_size=win_size, mlp_ratio=self.mlp_ratio,
584
+ qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate,
585
+ attn_drop=attn_drop_rate,
586
+ drop_path=dec_dpr[sum(depths[5:8]):sum(depths[5:9])], norm_layer=norm_layer,
587
+ use_checkpoint=use_checkpoint, token_projection=token_projection,
588
+ token_mlp=token_mlp, shift_flag=shift_flag, att=True, sparseAtt=True)
589
+ self.apply(self._init_weights)
590
+
591
+ def _init_weights(self, m):
592
+ if isinstance(m, nn.Linear):
593
+ trunc_normal_(m.weight, std=.02)
594
+ if isinstance(m, nn.Linear) and m.bias is not None:
595
+ nn.init.constant_(m.bias, 0)
596
+ elif isinstance(m, nn.LayerNorm):
597
+ nn.init.constant_(m.bias, 0)
598
+ nn.init.constant_(m.weight, 1.0)
599
+
600
+ def forward(self, x, mask=None):
601
+ y = self.input_proj(x)
602
+ y = self.pos_drop(y)
603
+ conv0 = self.encoderlayer_0(y, mask=mask)
604
+ pool0 = self.dowsample_0(conv0)
605
+ conv1 = self.encoderlayer_1(pool0, mask=mask)
606
+ pool1 = self.dowsample_1(conv1)
607
+ conv2 = self.encoderlayer_2(pool1, mask=mask)
608
+ pool2 = self.dowsample_2(conv2)
609
+ conv3 = self.encoderlayer_3(pool2, mask=mask)
610
+ pool3 = self.dowsample_3(conv3)
611
+ conv4 = self.conv(pool3, mask=mask)
612
+ up0 = self.upsample_0(conv4)
613
+ deconv0 = torch.cat([up0, conv3], -1)
614
+ deconv0 = self.decoderlayer_0(deconv0, mask=mask)
615
+ up1 = self.upsample_1(deconv0)
616
+ deconv1 = torch.cat([up1, conv2], -1)
617
+ deconv1 = self.decoderlayer_1(deconv1, mask=mask)
618
+ up2 = self.upsample_2(deconv1)
619
+ deconv2 = torch.cat([up2, conv1], -1)
620
+ deconv2 = self.decoderlayer_2(deconv2, mask=mask)
621
+ up3 = self.upsample_3(deconv2)
622
+ deconv3 = torch.cat([up3, conv0], -1)
623
+ deconv3 = self.decoderlayer_3(deconv3, mask=mask)
624
+ y = self.output_proj(deconv3)
625
+ return x + y if self.dd_in == 3 else y
626
+
627
+
628
+ #################################################################################
629
+ # #
630
+ # PART 2: Hugging Face 包装类 (The Hugging Face Wrapper Classes) #
631
+ # #
632
+ #################################################################################
633
+
634
+ class ASTConfig(PretrainedConfig):
635
+ """
636
+ This is the configuration class to store the configuration of an `AST` model.
637
+ """
638
+ model_type = "ast"
639
+
640
+ def __init__(self, **kwargs):
641
+ super().__init__(**kwargs)
642
+
643
+
644
+ class ASTForRestoration(PreTrainedModel):
645
+ """
646
+ This is the main model class that will be loaded by Hugging Face.
647
+ """
648
+ config_class = ASTConfig
649
+
650
+ def __init__(self, config: ASTConfig):
651
+ super().__init__(config)
652
+ self.model = AST(**config.to_dict())
653
+
654
+ def forward(self, pixel_values):
655
+ """
656
+ The forward pass of the model.
657
+ """
658
+ return self.model(pixel_values)
659
+
660
+
661
+ #################################################################################
662
+ # #
663
+ # PART 3: 主转换逻辑 (Main Conversion Logic) #
664
+ # #
665
+ #################################################################################
666
+
667
+ if __name__ == '__main__':
668
+ # --- 使用 argparse 使脚本可重用 ---
669
+ parser = argparse.ArgumentParser(description="Convert AST model .pth files to Hugging Face format.")
670
+ parser.add_argument("--pth_path", type=str, required=True, help="Path to the input .pth weight file.")
671
+ parser.add_argument("--output_dir", type=str, required=True, help="Directory to save the Hugging Face model.")
672
+ parser.add_argument("--task_name", type=str, default="restoration",
673
+ help="Name of the task (e.g., 'dehazing', 'desnowing') for logging.")
674
+ args = parser.parse_args()
675
+
676
+ # --- 模型架构参数 (最终修正版) ---
677
+ model_params = {
678
+ "img_size": 256,
679
+ "in_chans": 3,
680
+ "dd_in": 3,
681
+ "embed_dim": 32,
682
+ "depths": [1, 2, 8, 8, 2, 8, 8, 2, 1], # <--- 最终的关键修正!
683
+ "num_heads": [1, 2, 4, 8, 16, 16, 8, 4, 2],
684
+ "win_size": 8,
685
+ "mlp_ratio": 4.0,
686
+ "qkv_bias": True,
687
+ "qk_scale": None,
688
+ "drop_rate": 0.0,
689
+ "attn_drop_rate": 0.0,
690
+ "drop_path_rate": 0.1,
691
+ "patch_norm": True,
692
+ "use_checkpoint": False,
693
+ "token_projection": "linear",
694
+ "token_mlp": "frfn",
695
+ "shift_flag": True
696
+ }
697
+
698
+ # --- 执行转换 ---
699
+ print(f" 任务: {args.task_name.upper()} | 步骤 1/5: 正在创建 Hugging Face 模型实例 (AST)...")
700
+ hf_config = ASTConfig(**model_params)
701
+ hf_model = ASTForRestoration(hf_config)
702
+ print("模型实例创建成功!")
703
+
704
+ print(f"步骤 2/5: 正在从 '{args.pth_path}' 加载权重...")
705
+ if not os.path.exists(args.pth_path):
706
+ raise FileNotFoundError(f"错误: 找不到权重文件 '{args.pth_path}'。请检查路径是否正确。")
707
+ state_dict = torch.load(args.pth_path, map_location='cpu')
708
+ print("权重文件加载成功!")
709
+
710
+ print("步骤 3/5: 正在处理权重字典...")
711
+ # 检查权重是否嵌套在某个通用键下
712
+ if 'state_dict' in state_dict:
713
+ state_dict = state_dict['state_dict']
714
+ elif 'params_ema' in state_dict:
715
+ state_dict = state_dict['params_ema']
716
+ elif 'params' in state_dict:
717
+ state_dict = state_dict['params']
718
+
719
+ # 移除 'module.' 前缀
720
+ new_state_dict = {k.replace('module.', '', 1): v for k, v in state_dict.items()}
721
+
722
+ # 加载权重
723
+ hf_model.model.load_state_dict(new_state_dict)
724
+ hf_model.eval()
725
+ print("权重成功加载到模型中!")
726
+
727
+ print(f"步骤 4/5: 正在将模型保存到 '{args.output_dir}'...")
728
+ if not os.path.exists(args.output_dir):
729
+ os.makedirs(args.output_dir)
730
+ hf_model.save_pretrained(args.output_dir)
731
+ print(f"模型和 config.json 已保存!")
732
+
733
+ # 创建并保存图像处理器配置
734
+ image_processor_config = {
735
+ "do_normalize": True,
736
+ "image_mean": [0.5, 0.5, 0.5],
737
+ "image_std": [0.5, 0.5, 0.5],
738
+ "data_format": "channels_first"
739
+ }
740
+ with open(os.path.join(args.output_dir, 'preprocessor_config.json'), 'w') as f:
741
+ json.dump(image_processor_config, f)
742
+ print(f"图像处理器配置 (preprocessor_config.json) 已保存!")
743
+
744
+ print(f"\n任务 '{args.task_name.upper()}' 转换完成!")
745
+ print(f"查看输出目录: {args.output_dir}")
746
+ print("\n下一步操作:")
747
+ print(f"1. 将此脚本文件本身复制到输出目录 '{args.output_dir}' 中,并重命名为 `modeling_ast.py`。")
748
+ print("2. 将整个输出目录上传到您的 Hugging Face 仓库。")
749
+ print("3. 在 Hub 上加载模型时,请确保使用 `trust_remote_code=True`。")
preprocessor_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"do_normalize": true, "image_mean": [0.5, 0.5, 0.5], "image_std": [0.5, 0.5, 0.5], "data_format": "channels_first"}