|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
class Config(object): |
|
|
"""配置参数""" |
|
|
def __init__(self, dataset_path, embedding='random'): |
|
|
self.model_name = 'TextCNN' |
|
|
self.train_path = dataset_path + '/train.txt' |
|
|
self.dev_path = dataset_path + '/dev.txt' |
|
|
self.test_path = dataset_path + '/test.txt' |
|
|
self.class_list = [x.strip() for x in open( |
|
|
dataset_path + '/class.txt', encoding='utf-8').readlines()] |
|
|
self.vocab_path = dataset_path + '/vocab.pkl' |
|
|
|
|
|
|
|
|
if embedding != 'random': |
|
|
self.embedding_pretrained = torch.tensor( |
|
|
np.load(dataset_path + '/' + embedding)["embeddings"].astype('float32')) |
|
|
else: |
|
|
self.embedding_pretrained = None |
|
|
|
|
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
|
|
|
|
|
self.dropout = 0.5 |
|
|
self.require_improvement = 1000 |
|
|
self.num_classes = len(self.class_list) |
|
|
self.n_vocab = 0 |
|
|
self.num_epochs = 20 |
|
|
self.batch_size = 128 |
|
|
self.pad_size = 32 |
|
|
self.learning_rate = 1e-3 |
|
|
self.embed = self.embedding_pretrained.size(1) if self.embedding_pretrained is not None else 300 |
|
|
self.filter_sizes = (2, 3, 4) |
|
|
self.num_filters = 256 |
|
|
|
|
|
|
|
|
class TextCNN(nn.Module): |
|
|
"""TextCNN模型""" |
|
|
def __init__(self, config): |
|
|
super(TextCNN, self).__init__() |
|
|
if config.embedding_pretrained is not None: |
|
|
self.embedding = nn.Embedding.from_pretrained(config.embedding_pretrained, freeze=False) |
|
|
else: |
|
|
self.embedding = nn.Embedding(config.n_vocab, config.embed, padding_idx=config.n_vocab - 1) |
|
|
|
|
|
self.convs = nn.ModuleList( |
|
|
[nn.Conv2d(1, config.num_filters, (k, config.embed)) for k in config.filter_sizes]) |
|
|
self.dropout = nn.Dropout(config.dropout) |
|
|
self.fc = nn.Linear(config.num_filters * len(config.filter_sizes), config.num_classes) |
|
|
|
|
|
def conv_and_pool(self, x, conv): |
|
|
x = F.relu(conv(x)).squeeze(3) |
|
|
x = F.max_pool1d(x, x.size(2)).squeeze(2) |
|
|
return x |
|
|
|
|
|
def forward(self, x): |
|
|
out = self.embedding(x[0]) |
|
|
out = out.unsqueeze(1) |
|
|
out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1) |
|
|
features = self.dropout(out) |
|
|
out = self.fc(features) |
|
|
return out, features |
|
|
|
|
|
def feature(self, x): |
|
|
""" |
|
|
提取中间层特征向量,用于可视化 |
|
|
返回dropout层的输出(最终层前面的那一层) |
|
|
""" |
|
|
with torch.no_grad(): |
|
|
out = self.embedding(x[0]) |
|
|
out = out.unsqueeze(1) |
|
|
out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1) |
|
|
features = self.dropout(out) |
|
|
return features.cpu().numpy() |
|
|
|
|
|
def get_prediction(self, x): |
|
|
""" |
|
|
获取模型最终层的输出向量(logits) |
|
|
返回未经softmax的原始输出 |
|
|
""" |
|
|
with torch.no_grad(): |
|
|
out = self.embedding(x[0]) |
|
|
out = out.unsqueeze(1) |
|
|
out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1) |
|
|
features = self.dropout(out) |
|
|
predictions = self.fc(features) |
|
|
return predictions.cpu().numpy() |
|
|
|
|
|
def prediction(self, features_vector): |
|
|
""" |
|
|
根据中间特征向量预测结果 |
|
|
features_vector: 与feature函数输出形状一致的向量 |
|
|
返回预测类别 |
|
|
""" |
|
|
with torch.no_grad(): |
|
|
if isinstance(features_vector, np.ndarray): |
|
|
features_vector = torch.tensor(features_vector, dtype=torch.float32).to(self.fc.weight.device) |
|
|
|
|
|
|
|
|
if len(features_vector.shape) == 1: |
|
|
features_vector = features_vector.unsqueeze(0) |
|
|
|
|
|
predictions = self.fc(features_vector) |
|
|
predicted_classes = torch.argmax(predictions, dim=1) |
|
|
return predicted_classes.cpu().numpy() |
|
|
|