Xianfish9 commited on
Commit
8e3369b
·
verified ·
1 Parent(s): 89991b4

Upload model.py

Browse files
Files changed (1) hide show
  1. model.py +151 -0
model.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.init as init
4
+ import warnings
5
+
6
+ import torch.nn.functional as F
7
+ warnings.filterwarnings('ignore')
8
+
9
+
10
+ class CAFN(nn.Module):
11
+ def __init__(self, input_dim=46, num_classes=4, hidden_size=128): # --- 新增了 hidden_size 参数 ---
12
+ super(CAFN, self).__init__()
13
+
14
+ self.conv_layer11 = nn.Sequential(
15
+ nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3),
16
+ nn.ReLU(),
17
+ nn.MaxPool1d(kernel_size=2)
18
+ )
19
+ self.conv_layer12 = nn.Sequential(
20
+ nn.Conv1d(in_channels=3, out_channels=32, kernel_size=5),
21
+ nn.ReLU(),
22
+ nn.MaxPool1d(kernel_size=2)
23
+ )
24
+
25
+ self.conv_layer1 = nn.Sequential(
26
+ nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
27
+ nn.ReLU(),
28
+ nn.MaxPool1d(kernel_size=2)
29
+ )
30
+ self.conv_layer2 = nn.Sequential(
31
+ nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
32
+ nn.ReLU(),
33
+ nn.MaxPool1d(kernel_size=2)
34
+ )
35
+ self.conv_layer3 = nn.Sequential(
36
+ nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
37
+ nn.ReLU(),
38
+ nn.MaxPool1d(kernel_size=2)
39
+ )
40
+
41
+ # --- 删除了原有的分类头 ---
42
+ # self.conv_layer_w = nn.Sequential(...)
43
+ # self.flatten = nn.Flatten()
44
+ # self.fc_layer = nn.Sequential(...)
45
+
46
+ # --- 新增 biGRU 层 ---
47
+ self.hidden_size = 64
48
+ self.biGRU = nn.GRU(
49
+ input_size=64, # 输入特征维度,即CNN输出的通道数
50
+ hidden_size=hidden_size, # GRU隐藏层维度,可调超参
51
+ num_layers=1, # GRU层数,增加层数可以学习更复杂的模式
52
+ bidirectional=True, # 开启双向
53
+ batch_first=True, # 输入数据格式为 (batch, seq, feature)
54
+ )
55
+
56
+ # --- 新增一个全连接层,用于最终分类 ---
57
+ self.dropout_gru = nn.Dropout(0.15)
58
+ self.fc_gru = nn.Linear(hidden_size * 2, num_classes) # *2 是因为双向
59
+
60
+ self.apply(self.init_weights)
61
+ self.Residual = MSRN()
62
+
63
+ def init_weights(self, m):
64
+ if type(m) == nn.Conv1d or type(m) == nn.Linear:
65
+ init.xavier_uniform_(m.weight)
66
+ if m.bias is not None:
67
+ init.constant_(m.bias, 0.0)
68
+ # --- 新增对GRU权重的初始化(可选,但推荐) ---
69
+ elif type(m) == nn.GRU:
70
+ for name, param in m.named_parameters():
71
+ if 'weight_ih' in name:
72
+ init.xavier_uniform_(param.data)
73
+ elif 'weight_hh' in name:
74
+ init.orthogonal_(param.data)
75
+ elif 'bias' in name:
76
+ param.data.fill_(0)
77
+
78
+
79
+ def forward(self, x1, x2):
80
+ '''
81
+ x1: PSTAAP
82
+ x2: PhysicoChemical
83
+ '''
84
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
85
+ x1 = x1.to(device)
86
+ x1 = x1.unsqueeze(1)
87
+ x1 = self.conv_layer11(x1)
88
+ _, w1 = self.Residual(x1) # (batch_size, 64, 4)
89
+
90
+ x2 = x2.to(device)
91
+ x2 = x2.transpose(1, 2)
92
+ x2 = self.conv_layer12(x2)
93
+ _, w2 = self.Residual(x2) # (batch_size, 64, 4)
94
+
95
+ w = torch.cat((w1, w2), dim=2) # (batch_size, 64, 8)
96
+
97
+ x = w.permute(0, 2, 1)
98
+ self.biGRU.flatten_parameters()
99
+ output, _ = self.biGRU(x) # output shape: (batch, seq_len, hidden_size * 2)
100
+
101
+ forward_out = output[:, -1, :self.hidden_size]
102
+ backward_out = output[:, 0, self.hidden_size:]
103
+ x = torch.cat((forward_out, backward_out), dim=1) # (batch, hidden_size * 2)
104
+ x = self.dropout_gru(x)
105
+ x = self.fc_gru(x) # (batch, num_classes)
106
+
107
+ return x
108
+
109
+
110
+
111
+ class MSRN(nn.Module):
112
+ def __init__(self, input_dim=46, num_classes=4):
113
+ super(MSRN, self).__init__()
114
+
115
+ self.conv_layer1 = nn.Sequential(
116
+ nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3),
117
+ nn.ReLU(),
118
+ nn.Dropout(0.2),
119
+ nn.MaxPool1d(kernel_size=2)
120
+ )
121
+
122
+ self.conv_layer2 = nn.Sequential(
123
+ nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3),
124
+ nn.ReLU(),
125
+ nn.Dropout(0.2),
126
+ nn.MaxPool1d(kernel_size=2)
127
+ )
128
+
129
+ self.conv_layer3 = nn.Sequential(
130
+ nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
131
+ nn.ReLU(),
132
+ nn.Dropout(0.2),
133
+ nn.MaxPool1d(kernel_size=2)
134
+ )
135
+
136
+ self.apply(self.init_weights)
137
+
138
+ def init_weights(self, m):
139
+ if type(m) == nn.Conv1d or type(m) == nn.Linear:
140
+
141
+ init.xavier_uniform_(m.weight)
142
+ if m.bias is not None:
143
+
144
+ init.constant_(m.bias, 0.0)
145
+
146
+ def forward(self, x):
147
+ x1 = self.conv_layer1(x) # (64,10)
148
+ x2 = self.conv_layer2(x1) # (64,4)
149
+ w1 = x2
150
+ x3 = self.conv_layer3(x2) # (64,1)
151
+ return x3, w1