File size: 4,323 Bytes
5dc9a8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
# coding: UTF-8
import torch
import torch.nn as nn
import numpy as np


class Config(object):
    """配置参数"""

    def __init__(self, dataset, embedding):
        self.model_name = "TextRNN"
        self.train_path = dataset + "/data/train.txt"  # 训练集
        self.dev_path = dataset + "/data/dev.txt"  # 验证集
        self.test_path = dataset + "/data/test.txt"  # 测试集
        self.class_list = [
            x.strip()
            for x in open(dataset + "/data/class.txt", encoding="utf-8").readlines()
        ]  # 类别名单
        self.vocab_path = dataset + "/data/vocab.pkl"  # 词表
        self.save_path = (
            dataset + "/saved_dict/" + self.model_name + ".ckpt"
        )  # 模型训练结果
        self.log_path = dataset + "/log/" + self.model_name
        self.embedding_pretrained = (
            torch.tensor(
                np.load(dataset + "/data/" + embedding)["embeddings"].astype("float32")
            )
            if embedding != "random"
            else None
        )  # 预训练词向量
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu"
        )  # 设备

        self.dropout = 0.5  # 随机失活
        self.require_improvement = 1000  # 若超过1000batch效果还没提升,则提前结束训练
        self.num_classes = len(self.class_list)  # 类别数
        self.n_vocab = 0  # 词表大小,在运行时赋值
        self.num_epochs = 10  # epoch数
        self.batch_size = 128  # mini-batch大小
        self.pad_size = 32  # 每句话处理成的长度(短填长切)
        self.learning_rate = 1e-3  # 学习率
        self.embed = (
            self.embedding_pretrained.size(1)
            if self.embedding_pretrained is not None
            else 300
        )  # 字向量维度, 若使用了预训练词向量,则维度统一
        self.hidden_size = 128  # lstm隐藏层
        self.num_layers = 2  # lstm层数


"""Recurrent Neural Network for Text Classification with Multi-Task Learning"""


class TextRNN(nn.Module):
    def __init__(self, config):
        super(TextRNN, self).__init__()
        if config.embedding_pretrained is not None:
            self.embedding = nn.Embedding.from_pretrained(
                config.embedding_pretrained, freeze=False
            )
        else:
            self.embedding = nn.Embedding(
                config.n_vocab, config.embed, padding_idx=config.n_vocab - 1
            )
        self.lstm = nn.LSTM(
            config.embed,
            config.hidden_size,
            config.num_layers,
            bidirectional=True,
            batch_first=True,
            dropout=config.dropout,
        )
        self.fc = nn.Linear(config.hidden_size * 2, config.num_classes)

    def forward(self, x):
        x, _ = x
        out = self.embedding(x)  # [batch_size, seq_len, embeding]=[128, 32, 300]
        out, _ = self.lstm(out)
        out = self.fc(out[:, -1, :])  # 句子最后时刻的 hidden state
        return out
    
    def feature(self, x):
        """
        提取中间层特征向量,用于可视化
        返回LSTM最后时刻的隐藏状态(全连接层前面的那一层)
        """
        with torch.no_grad():
            x, _ = x
            out = self.embedding(x)  # [batch_size, seq_len, embedding]
            out, _ = self.lstm(out)   # [batch_size, seq_len, hidden_size * 2]
            features = out[:, -1, :]  # 取最后时刻的隐藏状态 [batch_size, hidden_size * 2]
            return features.cpu().numpy()
    
    def get_prediction(self, x):
        """
        获取模型最终层输出向量(logits)
        """
        with torch.no_grad():
            x, _ = x
            out = self.embedding(x)
            out, _ = self.lstm(out)
            predictions = self.fc(out[:, -1, :])  # [batch_size, num_classes]
            return predictions.cpu().numpy()
    
    def prediction(self, features):
        """
        根据中间特征向量预测结果
        features: 来自feature()函数的输出
        """
        with torch.no_grad():
            features_tensor = torch.tensor(features, dtype=torch.float32).to(next(self.parameters()).device)
            predictions = self.fc(features_tensor)
            return predictions.cpu().numpy()