|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
class Config(object): |
|
|
|
|
|
"""配置参数""" |
|
|
def __init__(self, dataset, embedding): |
|
|
self.model_name = 'TextRNN_Att' |
|
|
self.train_path = dataset + '/data/train.txt' |
|
|
self.dev_path = dataset + '/data/dev.txt' |
|
|
self.test_path = dataset + '/data/test.txt' |
|
|
self.class_list = [x.strip() for x in open( |
|
|
dataset + '/data/class.txt', encoding='utf-8').readlines()] |
|
|
self.vocab_path = dataset + '/data/vocab.pkl' |
|
|
self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' |
|
|
self.log_path = dataset + '/log/' + self.model_name |
|
|
self.embedding_pretrained = torch.tensor( |
|
|
np.load(dataset + '/data/' + embedding)["embeddings"].astype('float32'))\ |
|
|
if embedding != 'random' else None |
|
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
self.dropout = 0.5 |
|
|
self.require_improvement = 1000 |
|
|
self.num_classes = len(self.class_list) |
|
|
self.n_vocab = 0 |
|
|
self.num_epochs = 10 |
|
|
self.batch_size = 128 |
|
|
self.pad_size = 32 |
|
|
self.learning_rate = 1e-3 |
|
|
self.embed = self.embedding_pretrained.size(1)\ |
|
|
if self.embedding_pretrained is not None else 300 |
|
|
self.hidden_size = 128 |
|
|
self.num_layers = 2 |
|
|
self.hidden_size2 = 64 |
|
|
|
|
|
|
|
|
'''Attention-Based Bidirectional Long Short-Term Memory Networks for Relation Classification''' |
|
|
|
|
|
|
|
|
class TextRNN_Att(nn.Module): |
|
|
def __init__(self, config): |
|
|
super(TextRNN_Att, self).__init__() |
|
|
if config.embedding_pretrained is not None: |
|
|
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False) |
|
|
else: |
|
|
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1) |
|
|
self.lstm = nn.LSTM(config.embed, config.hidden_size, config.num_layers, |
|
|
bidirectional=True, batch_first=True, dropout=config.dropout) |
|
|
self.tanh1 = nn.Tanh() |
|
|
|
|
|
self.w = nn.Parameter(torch.zeros(config.hidden_size * 2)) |
|
|
self.tanh2 = nn.Tanh() |
|
|
self.fc1 = nn.Linear(config.hidden_size * 2, config.hidden_size2) |
|
|
self.fc = nn.Linear(config.hidden_size2, config.num_classes) |
|
|
|
|
|
def forward(self, x): |
|
|
x, _ = x |
|
|
emb = self.embedding(x) |
|
|
H, _ = self.lstm(emb) |
|
|
|
|
|
M = self.tanh1(H) |
|
|
|
|
|
alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze(-1) |
|
|
out = H * alpha |
|
|
out = torch.sum(out, 1) |
|
|
out = F.relu(out) |
|
|
out = self.fc1(out) |
|
|
out = self.fc(out) |
|
|
return out |
|
|
|
|
|
def feature(self, x): |
|
|
""" |
|
|
提取中间层特征向量,用于可视化 |
|
|
返回attention加权后的LSTM特征(fc1层的输出) |
|
|
""" |
|
|
with torch.no_grad(): |
|
|
x, _ = x |
|
|
emb = self.embedding(x) |
|
|
H, _ = self.lstm(emb) |
|
|
|
|
|
M = self.tanh1(H) |
|
|
alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze(-1) |
|
|
out = H * alpha |
|
|
out = torch.sum(out, 1) |
|
|
out = F.relu(out) |
|
|
return out.cpu().numpy() |
|
|
|
|
|
def get_prediction(self, x): |
|
|
""" |
|
|
获取模型最终层输出向量(logits) |
|
|
""" |
|
|
with torch.no_grad(): |
|
|
x, _ = x |
|
|
emb = self.embedding(x) |
|
|
H, _ = self.lstm(emb) |
|
|
|
|
|
M = self.tanh1(H) |
|
|
alpha = F.softmax(torch.matmul(M, self.w), dim=1).unsqueeze(-1) |
|
|
out = H * alpha |
|
|
out = torch.sum(out, 1) |
|
|
out = F.relu(out) |
|
|
out = self.fc1(out) |
|
|
predictions = self.fc(out) |
|
|
return predictions.cpu().numpy() |
|
|
|
|
|
def prediction(self, features): |
|
|
""" |
|
|
根据中间特征向量预测结果 |
|
|
features: 来自feature()函数的输出 [batch_size, hidden_size2] |
|
|
""" |
|
|
with torch.no_grad(): |
|
|
features_tensor = torch.tensor(features, dtype=torch.float32).to(next(self.parameters()).device) |
|
|
out = self.fc1(features_tensor) |
|
|
predictions = self.fc(out) |
|
|
return predictions.cpu().numpy() |
|
|
|