DeepKMulti / model.py
Xianfish9's picture
Upload model.py
8e3369b verified
raw
history blame
5.18 kB
import torch
import torch.nn as nn
import torch.nn.init as init
import warnings
import torch.nn.functional as F
warnings.filterwarnings('ignore')
class CAFN(nn.Module):
def __init__(self, input_dim=46, num_classes=4, hidden_size=128): # --- 新增了 hidden_size 参数 ---
super(CAFN, self).__init__()
self.conv_layer11 = nn.Sequential(
nn.Conv1d(in_channels=1, out_channels=32, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2)
)
self.conv_layer12 = nn.Sequential(
nn.Conv1d(in_channels=3, out_channels=32, kernel_size=5),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2)
)
self.conv_layer1 = nn.Sequential(
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2)
)
self.conv_layer2 = nn.Sequential(
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2)
)
self.conv_layer3 = nn.Sequential(
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2)
)
# --- 删除了原有的分类头 ---
# self.conv_layer_w = nn.Sequential(...)
# self.flatten = nn.Flatten()
# self.fc_layer = nn.Sequential(...)
# --- 新增 biGRU 层 ---
self.hidden_size = 64
self.biGRU = nn.GRU(
input_size=64, # 输入特征维度,即CNN输出的通道数
hidden_size=hidden_size, # GRU隐藏层维度,可调超参
num_layers=1, # GRU层数,增加层数可以学习更复杂的模式
bidirectional=True, # 开启双向
batch_first=True, # 输入数据格式为 (batch, seq, feature)
)
# --- 新增一个全连接层,用于最终分类 ---
self.dropout_gru = nn.Dropout(0.15)
self.fc_gru = nn.Linear(hidden_size * 2, num_classes) # *2 是因为双向
self.apply(self.init_weights)
self.Residual = MSRN()
def init_weights(self, m):
if type(m) == nn.Conv1d or type(m) == nn.Linear:
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0.0)
# --- 新增对GRU权重的初始化(可选,但推荐) ---
elif type(m) == nn.GRU:
for name, param in m.named_parameters():
if 'weight_ih' in name:
init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def forward(self, x1, x2):
'''
x1: PSTAAP
x2: PhysicoChemical
'''
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
x1 = x1.to(device)
x1 = x1.unsqueeze(1)
x1 = self.conv_layer11(x1)
_, w1 = self.Residual(x1) # (batch_size, 64, 4)
x2 = x2.to(device)
x2 = x2.transpose(1, 2)
x2 = self.conv_layer12(x2)
_, w2 = self.Residual(x2) # (batch_size, 64, 4)
w = torch.cat((w1, w2), dim=2) # (batch_size, 64, 8)
x = w.permute(0, 2, 1)
self.biGRU.flatten_parameters()
output, _ = self.biGRU(x) # output shape: (batch, seq_len, hidden_size * 2)
forward_out = output[:, -1, :self.hidden_size]
backward_out = output[:, 0, self.hidden_size:]
x = torch.cat((forward_out, backward_out), dim=1) # (batch, hidden_size * 2)
x = self.dropout_gru(x)
x = self.fc_gru(x) # (batch, num_classes)
return x
class MSRN(nn.Module):
def __init__(self, input_dim=46, num_classes=4):
super(MSRN, self).__init__()
self.conv_layer1 = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=32, kernel_size=3),
nn.ReLU(),
nn.Dropout(0.2),
nn.MaxPool1d(kernel_size=2)
)
self.conv_layer2 = nn.Sequential(
nn.Conv1d(in_channels=32, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.Dropout(0.2),
nn.MaxPool1d(kernel_size=2)
)
self.conv_layer3 = nn.Sequential(
nn.Conv1d(in_channels=64, out_channels=64, kernel_size=3),
nn.ReLU(),
nn.Dropout(0.2),
nn.MaxPool1d(kernel_size=2)
)
self.apply(self.init_weights)
def init_weights(self, m):
if type(m) == nn.Conv1d or type(m) == nn.Linear:
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0.0)
def forward(self, x):
x1 = self.conv_layer1(x) # (64,10)
x2 = self.conv_layer2(x1) # (64,4)
w1 = x2
x3 = self.conv_layer3(x2) # (64,1)
return x3, w1