|
|
import numpy as np |
|
|
from tqdm import tqdm |
|
|
import paddle.fluid as fluid |
|
|
from models import Classifier |
|
|
import paddle |
|
|
from sklearn.metrics import f1_score |
|
|
|
|
|
|
|
|
def build_batch(word2id_dict, corpus, batch_size, epoch_num, max_seq_len, shuffle=True): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sentence_batch = [] |
|
|
sentence_label_batch = [] |
|
|
|
|
|
for _ in range(epoch_num): |
|
|
|
|
|
|
|
|
if shuffle: |
|
|
np.random.shuffle(corpus) |
|
|
for sentence_label in corpus: |
|
|
sentence, label = sentence_label.rsplit(sep='\t', maxsplit=1) |
|
|
sentence = sentence.split(',') |
|
|
sentence_sample = sentence[:min(max_seq_len, len(sentence))] |
|
|
if len(sentence_sample) < max_seq_len: |
|
|
for _ in range(max_seq_len - len(sentence_sample)): |
|
|
sentence_sample.append(word2id_dict['<pad>']) |
|
|
|
|
|
|
|
|
sentence_batch.append(sentence_sample) |
|
|
sentence_label_batch.append([label]) |
|
|
|
|
|
if len(sentence_batch) == batch_size: |
|
|
yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64") |
|
|
sentence_batch = [] |
|
|
sentence_label_batch = [] |
|
|
|
|
|
if len(sentence_batch) == batch_size: |
|
|
yield np.array(sentence_batch).astype("int64"), np.array(sentence_label_batch).astype("int64") |
|
|
|
|
|
|
|
|
def train(train_path, place): |
|
|
pass |
|
|
|
|
|
|
|
|
def evaluate(dev_path, multi_classifier): |
|
|
dev_corpus = open(dev_path, 'r', encoding='utf8').readlines() |
|
|
|
|
|
layer_state_dict = paddle.load("models/multi_classifier.pdparams") |
|
|
opt_state_dict = paddle.load("models/adam.pdopt") |
|
|
|
|
|
multi_classifier.set_state_dict(layer_state_dict) |
|
|
adam.set_state_dict(opt_state_dict) |
|
|
|
|
|
|
|
|
dev_batch = build_batch(word2id_dict, dev_corpus, len(dev_corpus), 1, max_seq_len, shuffle=False) |
|
|
for sentences, labels in tqdm(dev_batch, desc='dev set batch'): |
|
|
sentences_var = fluid.dygraph.to_variable(sentences) |
|
|
labels_var = fluid.dygraph.to_variable(labels) |
|
|
|
|
|
pred, loss = multi_classifier(sentences_var, labels_var) |
|
|
|
|
|
pred_labels = np.argmax(pred.numpy(), axis=1).reshape(labels.shape) |
|
|
print(f1_score(labels, pred_labels, average='macro')) |
|
|
|
|
|
|
|
|
def predict(test_path, multi_classifier): |
|
|
test_corpus = open(test_path, 'r', encoding='utf8').readlines() |
|
|
|
|
|
layer_state_dict = paddle.load("models/multi_classifier.pdparams") |
|
|
opt_state_dict = paddle.load("models/adam.pdopt") |
|
|
|
|
|
multi_classifier.set_state_dict(layer_state_dict) |
|
|
adam.set_state_dict(opt_state_dict) |
|
|
|
|
|
|
|
|
dev_batch = build_batch(word2id_dict, test_corpus, len(test_corpus), 1, max_seq_len, shuffle=False) |
|
|
for sentences, labels in tqdm(dev_batch, desc='test set batch'): |
|
|
sentences_var = fluid.dygraph.to_variable(sentences) |
|
|
labels_var = fluid.dygraph.to_variable(labels) |
|
|
|
|
|
pred, loss = multi_classifier(sentences_var, labels_var) |
|
|
|
|
|
pred_labels = np.argmax(pred.numpy(), axis=1).reshape(labels.shape) |
|
|
print(f1_score(labels, pred_labels, average='macro')) |
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
|
|
batch_size = 256 |
|
|
epoch_num = 1 |
|
|
embedding_size = 128 |
|
|
learning_rate = 0.01 |
|
|
max_seq_len = 500 |
|
|
class_num = 15 |
|
|
|
|
|
use_gpu = False |
|
|
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() |
|
|
|
|
|
with open('data/dict.txt', 'r', encoding='utf-8') as f_data: |
|
|
word2id_dict = eval(f_data.readlines()[0]) |
|
|
word2id_dict = dict(word2id_dict) |
|
|
vocab_size = len(word2id_dict) |
|
|
train_corpus = open('data/Train_IDs.txt', 'r', encoding='utf8').readlines() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
step = 0 |
|
|
with fluid.dygraph.guard(): |
|
|
|
|
|
multi_classifier = Classifier( |
|
|
hidden_size=embedding_size, |
|
|
vocab_size=vocab_size, |
|
|
num_steps=max_seq_len, |
|
|
class_num=class_num |
|
|
) |
|
|
|
|
|
adam = fluid.optimizer.AdamOptimizer( |
|
|
learning_rate=learning_rate, |
|
|
parameter_list=multi_classifier.parameters() |
|
|
) |
|
|
|
|
|
train_batch = build_batch(word2id_dict, train_corpus, batch_size, epoch_num, max_seq_len) |
|
|
for sentences, labels in tqdm(train_batch, desc='train set batch'): |
|
|
sentences_var = fluid.dygraph.to_variable(sentences) |
|
|
labels_var = fluid.dygraph.to_variable(labels) |
|
|
pred, loss = multi_classifier(sentences_var, labels_var) |
|
|
loss.backward() |
|
|
adam.minimize(loss) |
|
|
multi_classifier.clear_gradients() |
|
|
step += 1 |
|
|
if step % 50 == 0: |
|
|
print("step %d, loss %.5f" % (step, loss.numpy()[0])) |
|
|
|
|
|
|
|
|
|
|
|
paddle.save(multi_classifier.state_dict(), "models/multi_classifier.pdparams") |
|
|
|
|
|
paddle.save(adam.state_dict(), "models/adam.pdopt") |
|
|
|
|
|
with fluid.dygraph.guard(): |
|
|
multi_classifier = Classifier( |
|
|
hidden_size=embedding_size, |
|
|
vocab_size=vocab_size, |
|
|
num_steps=max_seq_len, |
|
|
class_num=class_num) |
|
|
evaluate('data/Val_IDs.txt', multi_classifier) |
|
|
|