lstm for multiclassification
Browse files- models.py +261 -0
- paddle_2.py +152 -0
models.py
ADDED
|
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import paddle.fluid as fluid
|
| 2 |
+
from paddle.fluid.dygraph.nn import Embedding
|
| 3 |
+
import numpy as np
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class SimpleLSTMRNN(fluid.Layer):
|
| 7 |
+
def __init__(self, hidden_size, num_steps, num_layers=1, init_scale=0.1, dropout=None):
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
:param hidden_size: 表示embedding-size,或者是记忆向量的维度
|
| 11 |
+
:param num_steps: 表示这个长短时记忆网络,最多可以考虑多长的时间序列
|
| 12 |
+
:param num_layers: 表示这个长短时记忆网络内部有多少层
|
| 13 |
+
:param init_scale: 表示网络内部的参数的初始化范围
|
| 14 |
+
:param dropout:
|
| 15 |
+
"""
|
| 16 |
+
super(SimpleLSTMRNN, self).__init__()
|
| 17 |
+
self._hidden_size = hidden_size
|
| 18 |
+
self._num_layers = num_layers
|
| 19 |
+
self._init_scale = init_scale
|
| 20 |
+
self._dropout = dropout
|
| 21 |
+
self._input = None
|
| 22 |
+
self._num_steps = num_steps
|
| 23 |
+
self.cell_array = []
|
| 24 |
+
self.hidden_array = []
|
| 25 |
+
|
| 26 |
+
# weight_1_arr 用于存储不同层的长短时记忆网络中,不同门的W参数
|
| 27 |
+
self.weight_1_arr = []
|
| 28 |
+
# bias_arr 用于存储不同层的长短时记忆网络中,不同门的b参数
|
| 29 |
+
self.bias_arr = []
|
| 30 |
+
self.mask_array = []
|
| 31 |
+
|
| 32 |
+
# 通过使用create_parameter函数,创建不同长短时记忆网络层中的参数
|
| 33 |
+
# 通过LSTM原理,我们知道,我们总共需要8个形状为[_hidden_size, _hidden_size]的W向量
|
| 34 |
+
# 和4个形状为[_hidden_size]的b向量,因此,我们在声明参数的时候,
|
| 35 |
+
# 一次性声明一个大小为[self._hidden_size * 2, self._hidden_size * 4]的参数
|
| 36 |
+
# 和一个 大小为[self._hidden_size * 4]的参数,这样做的好处是,
|
| 37 |
+
# 可以使用一次矩阵计算,同时计算8个不同的矩阵乘法
|
| 38 |
+
# 以便加快计算速度
|
| 39 |
+
|
| 40 |
+
for i in range(self._num_layers):
|
| 41 |
+
weight_1 = self.create_parameter(
|
| 42 |
+
attr=fluid.ParamAttr(
|
| 43 |
+
initializer=fluid.initializer.UniformInitializer(
|
| 44 |
+
low=-self._init_scale, high=self._init_scale)),
|
| 45 |
+
shape=[self._hidden_size * 2, self._hidden_size * 4],
|
| 46 |
+
dtype="float32",
|
| 47 |
+
default_initializer=fluid.initializer.UniformInitializer(
|
| 48 |
+
low=-self._init_scale, high=self._init_scale))
|
| 49 |
+
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
|
| 50 |
+
bias_1 = self.create_parameter(
|
| 51 |
+
attr=fluid.ParamAttr(
|
| 52 |
+
initializer=fluid.initializer.UniformInitializer(
|
| 53 |
+
low=-self._init_scale, high=self._init_scale)),
|
| 54 |
+
shape=[self._hidden_size * 4],
|
| 55 |
+
dtype="float32",
|
| 56 |
+
default_initializer=fluid.initializer.Constant(0.0))
|
| 57 |
+
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
|
| 58 |
+
|
| 59 |
+
def forward(self, input_embedding, init_hidden=None, init_cell=None):
|
| 60 |
+
self.cell_array = []
|
| 61 |
+
self.hidden_array = []
|
| 62 |
+
|
| 63 |
+
# 输入有三个信号:
|
| 64 |
+
# 1. input_embedding,这个就是输入句子的embedding表示,
|
| 65 |
+
# 是一个形状为[batch_size, seq_len, embedding_size]的张量
|
| 66 |
+
# 2. init_hidden,这个表示LSTM中每一层的初始h的值,有时候,
|
| 67 |
+
# 我们需要显示地指定这个值,在不需要的时候,就可以把这个值设置为空
|
| 68 |
+
# 3. init_cell,这个表示LSTM中每一层的初始c的值,有时候,
|
| 69 |
+
# 我们需要显示地指定这个值,在不需要的时候,就可以把这个值设置为空
|
| 70 |
+
# 我们需要通过slice操作,把每一层的初始hidden和cell值拿出来,
|
| 71 |
+
# 并存储在cell_array和hidden_array中
|
| 72 |
+
for i in range(self._num_layers):
|
| 73 |
+
pre_hidden = fluid.layers.slice(
|
| 74 |
+
init_hidden, axes=[0], starts=[i], ends=[i + 1])
|
| 75 |
+
pre_cell = fluid.layers.slice(
|
| 76 |
+
init_cell, axes=[0], starts=[i], ends=[i + 1])
|
| 77 |
+
pre_hidden = fluid.layers.reshape(
|
| 78 |
+
pre_hidden, shape=[-1, self._hidden_size])
|
| 79 |
+
pre_cell = fluid.layers.reshape(
|
| 80 |
+
pre_cell, shape=[-1, self._hidden_size])
|
| 81 |
+
self.hidden_array.append(pre_hidden)
|
| 82 |
+
self.cell_array.append(pre_cell)
|
| 83 |
+
|
| 84 |
+
res = []
|
| 85 |
+
for index in range(self._num_steps):
|
| 86 |
+
# 首先需要通过slice函数,拿到输入tensor input_embedding中当前位置的词的向量表示
|
| 87 |
+
# 并把这个词的向量表示转换为一个大小为 [batch_size, embedding_size]的张量
|
| 88 |
+
self._input = fluid.layers.slice(
|
| 89 |
+
input_embedding, axes=[1], starts=[index], ends=[index + 1])
|
| 90 |
+
self._input = fluid.layers.reshape(
|
| 91 |
+
self._input, shape=[-1, self._hidden_size])
|
| 92 |
+
# 计算每一层的结果,从下而上
|
| 93 |
+
|
| 94 |
+
for k in range(self._num_layers):
|
| 95 |
+
# 首先获取每一层LSTM对应上一个时间步的hidden,cell,以及当前层的W和b参数
|
| 96 |
+
pre_hidden = self.hidden_array[k]
|
| 97 |
+
pre_cell = self.cell_array[k]
|
| 98 |
+
weight_1 = self.weight_1_arr[k]
|
| 99 |
+
bias = self.bias_arr[k]
|
| 100 |
+
# 我们把hidden和拿到的当前步的input拼接在一起,便于后续计算
|
| 101 |
+
nn = fluid.layers.concat([self._input, pre_hidden], 1)
|
| 102 |
+
# 将输入门,遗忘门,输出门等对应的W参数,和输入input和pre-hidden相乘
|
| 103 |
+
# 我们通过一步计算,就同时完成了8个不同的矩阵运算,提高了运算效率
|
| 104 |
+
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
|
| 105 |
+
# 将b参数也加入到前面的运算结果中
|
| 106 |
+
gate_input = fluid.layers.elementwise_add(gate_input, bias)
|
| 107 |
+
# 通过split函数,将每个门得到的结果拿出来
|
| 108 |
+
i, j, f, o = fluid.layers.split(
|
| 109 |
+
gate_input, num_or_sections=4, dim=-1)
|
| 110 |
+
# 把输入门,遗忘门,输出门等对应的权重作用在当前输入input和pre-hidden上
|
| 111 |
+
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
|
| 112 |
+
i) * fluid.layers.tanh(j)
|
| 113 |
+
h = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
|
| 114 |
+
# 记录当前步骤的计算结果,
|
| 115 |
+
# h是当前步骤需要输出的hidden
|
| 116 |
+
# c是当前步骤需要输出的cell
|
| 117 |
+
self.hidden_array[k] = h
|
| 118 |
+
self.cell_array[k] = c
|
| 119 |
+
self._input = h
|
| 120 |
+
# 一般来说,我们有时候会在LSTM的结果结果内加入dropout操作
|
| 121 |
+
# 这样会提高模型的训练鲁棒性
|
| 122 |
+
if self._dropout is not None and self._dropout > 0.0:
|
| 123 |
+
self._input = fluid.layers.dropout(
|
| 124 |
+
self._input,
|
| 125 |
+
dropout_prob=self._dropout,
|
| 126 |
+
dropout_implementation='upscale_in_train')
|
| 127 |
+
res.append(
|
| 128 |
+
fluid.layers.reshape(
|
| 129 |
+
self._input, shape=[1, -1, self._hidden_size]))
|
| 130 |
+
|
| 131 |
+
# 计算长短时记忆网络的结果返回回来,包括:
|
| 132 |
+
# 1. real_res:每个时间步上不同层的hidden结果
|
| 133 |
+
# 2. last_hidden:最后一个时间步中,每一层的hidden的结果,
|
| 134 |
+
# 形状为:[batch_size, num_layers, hidden_size]
|
| 135 |
+
# 3. last_cell:最后一个时间步中,每一层的cell的结果,
|
| 136 |
+
# 形状为:[batch_size, num_layers, hidden_size]
|
| 137 |
+
real_res = fluid.layers.concat(res, 0)
|
| 138 |
+
real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
|
| 139 |
+
last_hidden = fluid.layers.concat(self.hidden_array, 1)
|
| 140 |
+
last_hidden = fluid.layers.reshape(
|
| 141 |
+
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
|
| 142 |
+
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
|
| 143 |
+
last_cell = fluid.layers.concat(self.cell_array, 1)
|
| 144 |
+
last_cell = fluid.layers.reshape(
|
| 145 |
+
last_cell, shape=[-1, self._num_layers, self._hidden_size])
|
| 146 |
+
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
|
| 147 |
+
return real_res, last_hidden, last_cell
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class Classifier(fluid.Layer):
|
| 151 |
+
def __init__(self,
|
| 152 |
+
hidden_size,
|
| 153 |
+
vocab_size,
|
| 154 |
+
class_num=2,
|
| 155 |
+
num_layers=1,
|
| 156 |
+
num_steps=128,
|
| 157 |
+
init_scale=0.1,
|
| 158 |
+
dropout=None):
|
| 159 |
+
# 这个模型的参数分别为:
|
| 160 |
+
# 1. hidden_size,表示embedding-size,hidden和cell向量的维度
|
| 161 |
+
# 2. vocab_size,模型可以考虑的词表大小
|
| 162 |
+
# 3. class_num,情感类型个数,可以是2分类,也可以是多分类
|
| 163 |
+
# 4. num_steps,表示这个情感分析模型最大可以考虑的句子长度
|
| 164 |
+
# 5. init_scale,表示网络内部的参数的初始化范围,
|
| 165 |
+
# 长短时记忆网络内部用了很多tanh,sigmoid等激活函数,这些函数对数值精度非常敏感,
|
| 166 |
+
# 因此我们一般只使用比较小的初始化范围,以保证效果
|
| 167 |
+
|
| 168 |
+
super(Classifier, self).__init__()
|
| 169 |
+
self.hidden_size = hidden_size
|
| 170 |
+
self.vocab_size = vocab_size
|
| 171 |
+
self.class_num = class_num
|
| 172 |
+
self.init_scale = init_scale
|
| 173 |
+
self.num_layers = num_layers
|
| 174 |
+
self.num_steps = num_steps
|
| 175 |
+
self.dropout = dropout
|
| 176 |
+
|
| 177 |
+
# 声明一个embedding层,用来把句子中的每个词转换为向量
|
| 178 |
+
self.embedding = Embedding(
|
| 179 |
+
size=[vocab_size, hidden_size],
|
| 180 |
+
dtype='float32',
|
| 181 |
+
is_sparse=False,
|
| 182 |
+
param_attr=fluid.ParamAttr(
|
| 183 |
+
name='embedding_para',
|
| 184 |
+
initializer=fluid.initializer.UniformInitializer(
|
| 185 |
+
low=-init_scale, high=init_scale)))
|
| 186 |
+
|
| 187 |
+
# 声明一个LSTM模型,用来把一个句子抽象城一个向量
|
| 188 |
+
self.simple_lstm_rnn = SimpleLSTMRNN(
|
| 189 |
+
hidden_size,
|
| 190 |
+
num_steps,
|
| 191 |
+
num_layers=num_layers,
|
| 192 |
+
init_scale=init_scale,
|
| 193 |
+
dropout=dropout)
|
| 194 |
+
|
| 195 |
+
# 在得到一个句子的向量表示后,我们需要根据这个向量表示对这个句子进行分类
|
| 196 |
+
# 一般来说,我们可以把这个句子的向量表示,
|
| 197 |
+
# 乘以一个大小为[self.hidden_size, self.class_num]的W参数
|
| 198 |
+
# 并加上一个大小为[self.class_num]的b参数
|
| 199 |
+
# 通过这种手段达到把句子向量映射到分类结果的目标
|
| 200 |
+
# 我们需要声明最终在使用句子向量映射到具体情感类别过程中所需要使用的参数
|
| 201 |
+
# 这个参数的大小一般是[self.hidden_size, self.class_num]
|
| 202 |
+
self.softmax_weight = self.create_parameter(
|
| 203 |
+
attr=fluid.ParamAttr(),
|
| 204 |
+
shape=[self.hidden_size, self.class_num],
|
| 205 |
+
dtype="float32",
|
| 206 |
+
default_initializer=fluid.initializer.UniformInitializer(
|
| 207 |
+
low=-self.init_scale, high=self.init_scale))
|
| 208 |
+
# 同样的,我们需要声明最终分类过程中的b参数
|
| 209 |
+
# 这个参数的大小一般是[self.class_num]
|
| 210 |
+
self.softmax_bias = self.create_parameter(
|
| 211 |
+
attr=fluid.ParamAttr(),
|
| 212 |
+
shape=[self.class_num],
|
| 213 |
+
dtype="float32",
|
| 214 |
+
default_initializer=fluid.initializer.UniformInitializer(
|
| 215 |
+
low=-self.init_scale, high=self.init_scale))
|
| 216 |
+
|
| 217 |
+
def forward(self, input, label):
|
| 218 |
+
batch_size, embedding_size = input.shape[0], self.hidden_size
|
| 219 |
+
# 首先我们需要定义LSTM的初始hidden和cell,这里我们使用0来初始化这个序列的记忆
|
| 220 |
+
init_hidden_data = np.zeros(
|
| 221 |
+
(1, batch_size, embedding_size), dtype='float32')
|
| 222 |
+
init_cell_data = np.zeros(
|
| 223 |
+
(1, batch_size, embedding_size), dtype='float32')
|
| 224 |
+
|
| 225 |
+
# 将这些初始记忆转换为飞桨可计算的向量
|
| 226 |
+
# 并设置stop-gradient=True,避免这些向量被更新,从而影响训练效果
|
| 227 |
+
init_hidden = fluid.dygraph.to_variable(init_hidden_data)
|
| 228 |
+
init_hidden.stop_gradient = True
|
| 229 |
+
init_cell = fluid.dygraph.to_variable(init_cell_data)
|
| 230 |
+
init_cell.stop_gradient = True
|
| 231 |
+
init_h = fluid.layers.reshape(
|
| 232 |
+
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
|
| 233 |
+
init_c = fluid.layers.reshape(
|
| 234 |
+
init_cell, shape=[self.num_layers, -1, self.hidden_size])
|
| 235 |
+
|
| 236 |
+
# 将输入的句子的mini-batch input,转换为词向量表示
|
| 237 |
+
input_emb = self.embedding(input)
|
| 238 |
+
input_emb = fluid.layers.reshape(
|
| 239 |
+
input_emb, shape=[-1, self.num_steps, self.hidden_size])
|
| 240 |
+
if self.dropout is not None and self.dropout > 0.0:
|
| 241 |
+
input_emb = fluid.layers.dropout(
|
| 242 |
+
input_emb,
|
| 243 |
+
dropout_prob=self.dropout,
|
| 244 |
+
dropout_implementation='upscale_in_train')
|
| 245 |
+
# 使用LSTM网络,把每个句子转换为向量表示
|
| 246 |
+
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(input_emb, init_h,
|
| 247 |
+
init_c)
|
| 248 |
+
last_hidden = fluid.layers.reshape(
|
| 249 |
+
last_hidden, shape=[-1, self.hidden_size])
|
| 250 |
+
# 将每个句子的向量表示,通过矩阵计算,映射到具体的类别上
|
| 251 |
+
projection = fluid.layers.matmul(last_hidden, self.softmax_weight)
|
| 252 |
+
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
|
| 253 |
+
projection = fluid.layers.reshape(
|
| 254 |
+
projection, shape=[-1, self.class_num])
|
| 255 |
+
pred = fluid.layers.softmax(projection, axis=-1)
|
| 256 |
+
# 根据给定的标签信息,计算整个网络的损失函数,这里我们可以直接使用分类任务中常使用的交叉熵来训练网络
|
| 257 |
+
loss = fluid.layers.softmax_with_cross_entropy(
|
| 258 |
+
logits=projection, label=label, soft_label=False)
|
| 259 |
+
loss = fluid.layers.reduce_mean(loss)
|
| 260 |
+
# 最终返回预测结果pred,和网络的loss
|
| 261 |
+
return pred, loss
|
paddle_2.py
ADDED
|
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
from tqdm import tqdm
|
| 3 |
+
import paddle.fluid as fluid
|
| 4 |
+
from models import Classifier
|
| 5 |
+
import paddle
|
| 6 |
+
from sklearn.metrics import f1_score
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def build_batch(word2id_dict, corpus, batch_size, epoch_num, max_seq_len, shuffle=True):
|
| 10 |
+
# 模型将会接受的两个输入:
|
| 11 |
+
# 1. 一个形状为[batch_size, max_seq_len]的张量,sentence_batch,代表了一个mini-batch的句子。
|
| 12 |
+
# 2. 一个形状为[batch_size, 1]的张量,sentence_label_batch,
|
| 13 |
+
# 每个元素都是非0即1,代表了每个句子的情感类别(正向或者负向)
|
| 14 |
+
sentence_batch = []
|
| 15 |
+
sentence_label_batch = []
|
| 16 |
+
|
| 17 |
+
for _ in range(epoch_num):
|
| 18 |
+
# 每个epcoh前都shuffle一下数据,有助于提高模型训练的效果
|
| 19 |
+
# 但是对于预测任务,不要做数据shuffle
|
| 20 |
+
if shuffle:
|
| 21 |
+
np.random.shuffle(corpus)
|
| 22 |
+
for sentence_label in corpus:
|
| 23 |
+
sentence, label = sentence_label.rsplit(sep='\t', maxsplit=1)
|
| 24 |
+
sentence = sentence.split(',')
|
| 25 |
+
sentence_sample = sentence[:min(max_seq_len, len(sentence))]
|
| 26 |
+
if len(sentence_sample) < max_seq_len:
|
| 27 |
+
for _ in range(max_seq_len - len(sentence_sample)):
|
| 28 |
+
sentence_sample.append(word2id_dict['<pad>'])
|
| 29 |
+
# 飞桨1.6.1要求输入数据必须是形状为[batch_size, max_seq_len,1]的张量
|
| 30 |
+
# sentence_sample = [[word_id] for word_id in sentence_sample]
|
| 31 |
+
sentence_batch.append(sentence_sample)
|
| 32 |
+
sentence_label_batch.append([label])
|
| 33 |
+
|
| 34 |
+
if len(sentence_batch) == batch_size:
|
| 35 |
+
yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64")
|
| 36 |
+
sentence_batch = []
|
| 37 |
+
sentence_label_batch = []
|
| 38 |
+
|
| 39 |
+
if len(sentence_batch) == batch_size:
|
| 40 |
+
yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def train(train_path, place):
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def evaluate(dev_path, multi_classifier):
|
| 48 |
+
dev_corpus = open(dev_path, 'r', encoding='utf8').readlines()
|
| 49 |
+
# 载入模型参数、优化器参数和最后一个epoch保存的检查点
|
| 50 |
+
layer_state_dict = paddle.load("models/multi_classifier.pdparams")
|
| 51 |
+
opt_state_dict = paddle.load("models/adam.pdopt")
|
| 52 |
+
# 将load后的参数与模型关联起来
|
| 53 |
+
multi_classifier.set_state_dict(layer_state_dict)
|
| 54 |
+
adam.set_state_dict(opt_state_dict)
|
| 55 |
+
|
| 56 |
+
# 记录模型预测结果的f1 score
|
| 57 |
+
dev_batch = build_batch(word2id_dict, dev_corpus, len(dev_corpus), 1, max_seq_len, shuffle=False)
|
| 58 |
+
for sentences, labels in tqdm(dev_batch, desc='dev set batch'):
|
| 59 |
+
sentences_var = fluid.dygraph.to_variable(sentences)
|
| 60 |
+
labels_var = fluid.dygraph.to_variable(labels)
|
| 61 |
+
# 获取模型对当前batch的输出结果
|
| 62 |
+
pred, loss = multi_classifier(sentences_var, labels_var)
|
| 63 |
+
# 把输出结果转换为numpy array的数据结构
|
| 64 |
+
pred_labels = np.argmax(pred.numpy(), axis=1).reshape(labels.shape)
|
| 65 |
+
print(f1_score(labels, pred_labels, average='macro'))
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def predict(test_path, multi_classifier):
|
| 69 |
+
test_corpus = open(test_path, 'r', encoding='utf8').readlines()
|
| 70 |
+
# 载入模型参数、优化器参数和最后一个epoch保存的检查点
|
| 71 |
+
layer_state_dict = paddle.load("models/multi_classifier.pdparams")
|
| 72 |
+
opt_state_dict = paddle.load("models/adam.pdopt")
|
| 73 |
+
# 将load后的参数与模型关联起来
|
| 74 |
+
multi_classifier.set_state_dict(layer_state_dict)
|
| 75 |
+
adam.set_state_dict(opt_state_dict)
|
| 76 |
+
|
| 77 |
+
# 记录模型预测结果的f1 score
|
| 78 |
+
dev_batch = build_batch(word2id_dict, test_corpus, len(test_corpus), 1, max_seq_len, shuffle=False)
|
| 79 |
+
for sentences, labels in tqdm(dev_batch, desc='test set batch'):
|
| 80 |
+
sentences_var = fluid.dygraph.to_variable(sentences)
|
| 81 |
+
labels_var = fluid.dygraph.to_variable(labels)
|
| 82 |
+
# 获取模型对当前batch的输出结果
|
| 83 |
+
pred, loss = multi_classifier(sentences_var, labels_var)
|
| 84 |
+
# 把输出结果转换为numpy array的数据结构
|
| 85 |
+
pred_labels = np.argmax(pred.numpy(), axis=1).reshape(labels.shape)
|
| 86 |
+
print(f1_score(labels, pred_labels, average='macro'))
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
if __name__ == '__main__':
|
| 90 |
+
# 开始训练
|
| 91 |
+
batch_size = 256
|
| 92 |
+
epoch_num = 1
|
| 93 |
+
embedding_size = 128
|
| 94 |
+
learning_rate = 0.01
|
| 95 |
+
max_seq_len = 500
|
| 96 |
+
class_num = 15
|
| 97 |
+
|
| 98 |
+
use_gpu = False
|
| 99 |
+
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
|
| 100 |
+
|
| 101 |
+
with open('data/dict.txt', 'r', encoding='utf-8') as f_data:
|
| 102 |
+
word2id_dict = eval(f_data.readlines()[0])
|
| 103 |
+
word2id_dict = dict(word2id_dict)
|
| 104 |
+
vocab_size = len(word2id_dict)
|
| 105 |
+
train_corpus = open('data/Train_IDs.txt', 'r', encoding='utf8').readlines()
|
| 106 |
+
|
| 107 |
+
# with open('datatest/vocab.json', 'r', encoding='utf-8') as f_data:
|
| 108 |
+
# word2id_dict = eval(f_data.readlines()[0])
|
| 109 |
+
# word2id_dict = dict(word2id_dict)
|
| 110 |
+
# vocab_size = len(word2id_dict)
|
| 111 |
+
# train_corpus = open('datatest/train_idx.txt', 'r', encoding='utf8').readlines()
|
| 112 |
+
|
| 113 |
+
step = 0
|
| 114 |
+
with fluid.dygraph.guard():
|
| 115 |
+
# 创建一个用于情感分类的网络实例,sentiment_classifier
|
| 116 |
+
multi_classifier = Classifier(
|
| 117 |
+
hidden_size=embedding_size,
|
| 118 |
+
vocab_size=vocab_size,
|
| 119 |
+
num_steps=max_seq_len,
|
| 120 |
+
class_num=class_num
|
| 121 |
+
)
|
| 122 |
+
# 创建优化器AdamOptimizer,用于更新这个网络的参数
|
| 123 |
+
adam = fluid.optimizer.AdamOptimizer(
|
| 124 |
+
learning_rate=learning_rate,
|
| 125 |
+
parameter_list=multi_classifier.parameters()
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
train_batch = build_batch(word2id_dict, train_corpus, batch_size, epoch_num, max_seq_len)
|
| 129 |
+
for sentences, labels in tqdm(train_batch, desc='train set batch'):
|
| 130 |
+
sentences_var = fluid.dygraph.to_variable(sentences)
|
| 131 |
+
labels_var = fluid.dygraph.to_variable(labels)
|
| 132 |
+
pred, loss = multi_classifier(sentences_var, labels_var)
|
| 133 |
+
loss.backward()
|
| 134 |
+
adam.minimize(loss)
|
| 135 |
+
multi_classifier.clear_gradients()
|
| 136 |
+
step += 1
|
| 137 |
+
if step % 50 == 0:
|
| 138 |
+
print("step %d, loss %.5f" % (step, loss.numpy()[0]))
|
| 139 |
+
# print("Epoch {} batch {}: loss = {}".format(
|
| 140 |
+
# epoch_id, batch_id, np.mean(loss.numpy())))
|
| 141 |
+
# 保存Layer参数
|
| 142 |
+
paddle.save(multi_classifier.state_dict(), "models/multi_classifier.pdparams")
|
| 143 |
+
# 保存优化器参数
|
| 144 |
+
paddle.save(adam.state_dict(), "models/adam.pdopt")
|
| 145 |
+
|
| 146 |
+
with fluid.dygraph.guard():
|
| 147 |
+
multi_classifier = Classifier(
|
| 148 |
+
hidden_size=embedding_size,
|
| 149 |
+
vocab_size=vocab_size,
|
| 150 |
+
num_steps=max_seq_len,
|
| 151 |
+
class_num=class_num)
|
| 152 |
+
evaluate('data/Val_IDs.txt', multi_classifier)
|