File size: 6,745 Bytes
3970ffd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 |
import numpy as np
from tqdm import tqdm
import paddle.fluid as fluid
from models import Classifier
import paddle
from sklearn.metrics import f1_score
def build_batch(word2id_dict, corpus, batch_size, epoch_num, max_seq_len, shuffle=True):
# 模型将会接受的两个输入:
# 1. 一个形状为[batch_size, max_seq_len]的张量,sentence_batch,代表了一个mini-batch的句子。
# 2. 一个形状为[batch_size, 1]的张量,sentence_label_batch,
# 每个元素都是非0即1,代表了每个句子的情感类别(正向或者负向)
sentence_batch = []
sentence_label_batch = []
for _ in range(epoch_num):
# 每个epcoh前都shuffle一下数据,有助于提高模型训练的效果
# 但是对于预测任务,不要做数据shuffle
if shuffle:
np.random.shuffle(corpus)
for sentence_label in corpus:
sentence, label = sentence_label.rsplit(sep='\t', maxsplit=1)
sentence = sentence.split(',')
sentence_sample = sentence[:min(max_seq_len, len(sentence))]
if len(sentence_sample) < max_seq_len:
for _ in range(max_seq_len - len(sentence_sample)):
sentence_sample.append(word2id_dict['<pad>'])
# 飞桨1.6.1要求输入数据必须是形状为[batch_size, max_seq_len,1]的张量
# sentence_sample = [[word_id] for word_id in sentence_sample]
sentence_batch.append(sentence_sample)
sentence_label_batch.append([label])
if len(sentence_batch) == batch_size:
yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64")
sentence_batch = []
sentence_label_batch = []
if len(sentence_batch) == batch_size:
yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64")
def train(train_path, place):
pass
def evaluate(dev_path, multi_classifier):
dev_corpus = open(dev_path, 'r', encoding='utf8').readlines()
# 载入模型参数、优化器参数和最后一个epoch保存的检查点
layer_state_dict = paddle.load("models/multi_classifier.pdparams")
opt_state_dict = paddle.load("models/adam.pdopt")
# 将load后的参数与模型关联起来
multi_classifier.set_state_dict(layer_state_dict)
adam.set_state_dict(opt_state_dict)
# 记录模型预测结果的f1 score
dev_batch = build_batch(word2id_dict, dev_corpus, len(dev_corpus), 1, max_seq_len, shuffle=False)
for sentences, labels in tqdm(dev_batch, desc='dev set batch'):
sentences_var = fluid.dygraph.to_variable(sentences)
labels_var = fluid.dygraph.to_variable(labels)
# 获取模型对当前batch的输出结果
pred, loss = multi_classifier(sentences_var, labels_var)
# 把输出结果转换为numpy array的数据结构
pred_labels = np.argmax(pred.numpy(), axis=1).reshape(labels.shape)
print(f1_score(labels, pred_labels, average='macro'))
def predict(test_path, multi_classifier):
test_corpus = open(test_path, 'r', encoding='utf8').readlines()
# 载入模型参数、优化器参数和最后一个epoch保存的检查点
layer_state_dict = paddle.load("models/multi_classifier.pdparams")
opt_state_dict = paddle.load("models/adam.pdopt")
# 将load后的参数与模型关联起来
multi_classifier.set_state_dict(layer_state_dict)
adam.set_state_dict(opt_state_dict)
# 记录模型预测结果的f1 score
dev_batch = build_batch(word2id_dict, test_corpus, len(test_corpus), 1, max_seq_len, shuffle=False)
for sentences, labels in tqdm(dev_batch, desc='test set batch'):
sentences_var = fluid.dygraph.to_variable(sentences)
labels_var = fluid.dygraph.to_variable(labels)
# 获取模型对当前batch的输出结果
pred, loss = multi_classifier(sentences_var, labels_var)
# 把输出结果转换为numpy array的数据结构
pred_labels = np.argmax(pred.numpy(), axis=1).reshape(labels.shape)
print(f1_score(labels, pred_labels, average='macro'))
if __name__ == '__main__':
# 开始训练
batch_size = 256
epoch_num = 1
embedding_size = 128
learning_rate = 0.01
max_seq_len = 500
class_num = 15
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
with open('data/dict.txt', 'r', encoding='utf-8') as f_data:
word2id_dict = eval(f_data.readlines()[0])
word2id_dict = dict(word2id_dict)
vocab_size = len(word2id_dict)
train_corpus = open('data/Train_IDs.txt', 'r', encoding='utf8').readlines()
# with open('datatest/vocab.json', 'r', encoding='utf-8') as f_data:
# word2id_dict = eval(f_data.readlines()[0])
# word2id_dict = dict(word2id_dict)
# vocab_size = len(word2id_dict)
# train_corpus = open('datatest/train_idx.txt', 'r', encoding='utf8').readlines()
step = 0
with fluid.dygraph.guard():
# 创建一个用于情感分类的网络实例,sentiment_classifier
multi_classifier = Classifier(
hidden_size=embedding_size,
vocab_size=vocab_size,
num_steps=max_seq_len,
class_num=class_num
)
# 创建优化器AdamOptimizer,用于更新这个网络的参数
adam = fluid.optimizer.AdamOptimizer(
learning_rate=learning_rate,
parameter_list=multi_classifier.parameters()
)
train_batch = build_batch(word2id_dict, train_corpus, batch_size, epoch_num, max_seq_len)
for sentences, labels in tqdm(train_batch, desc='train set batch'):
sentences_var = fluid.dygraph.to_variable(sentences)
labels_var = fluid.dygraph.to_variable(labels)
pred, loss = multi_classifier(sentences_var, labels_var)
loss.backward()
adam.minimize(loss)
multi_classifier.clear_gradients()
step += 1
if step % 50 == 0:
print("step %d, loss %.5f" % (step, loss.numpy()[0]))
# print("Epoch {} batch {}: loss = {}".format(
# epoch_id, batch_id, np.mean(loss.numpy())))
# 保存Layer参数
paddle.save(multi_classifier.state_dict(), "models/multi_classifier.pdparams")
# 保存优化器参数
paddle.save(adam.state_dict(), "models/adam.pdopt")
with fluid.dygraph.guard():
multi_classifier = Classifier(
hidden_size=embedding_size,
vocab_size=vocab_size,
num_steps=max_seq_len,
class_num=class_num)
evaluate('data/Val_IDs.txt', multi_classifier)
|