File size: 5,062 Bytes
0ee6a96 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 | import jieba
from collections import Counter
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
class DataProcessor:
def __init__(self, vocab=None, max_len=100):
self.vocab = vocab
self.max_len = max_len
def tokenize(self, sentence):
return list(jieba.cut(sentence))
def build_vocab(self, data):
vocab = Counter()
for sentence, label in data:
vocab.update(self.tokenize(sentence))
vocab = {word: idx for idx, (word, _) in enumerate(vocab.most_common(), start=1)}
vocab['<PAD>'] = 0 # special token for padding
self.vocab = vocab
return vocab
def sentence_to_indices(self, sentence):
return [self.vocab.get(word, 0) for word in self.tokenize(sentence)]
def pad_sequence(self, seq):
if len(seq) < self.max_len:
return seq + [0] * (self.max_len - len(seq)) # padding
return seq[:self.max_len]
def preprocess_data(self, data):
return [(self.pad_sequence(self.sentence_to_indices(sentence)), label) for sentence, label in data]
def load_data(self, file_path):
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
sentence, label = line.strip().split('\t')
data.append((sentence, int(label)))
return data
class CNNClassifier(nn.Module):
def __init__(self, vocab_size, embed_size, num_classes):
super(CNNClassifier, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_size)
self.conv1 = nn.Conv2d(1, 100, (3, embed_size))
self.conv2 = nn.Conv2d(1, 100, (4, embed_size))
self.conv3 = nn.Conv2d(1, 100, (5, embed_size))
self.fc = nn.Linear(300, num_classes)
def forward(self, x):
x = self.embedding(x).unsqueeze(1) # (batch_size, 1, max_len, embed_size)
x1 = F.relu(self.conv1(x)).squeeze(3) # (batch_size, 100, max_len-3+1)
x1 = F.max_pool1d(x1, x1.size(2)).squeeze(2)
x2 = F.tanh(self.conv2(x)).squeeze(3)
x2 = F.max_pool1d(x2, x2.size(2)).squeeze(2)
x3 = F.tanh(self.conv3(x)).squeeze(3)
x3 = F.max_pool1d(x3, x3.size(2)).squeeze(2)
out = torch.cat((x1, x2, x3), 1)
out = self.fc(out)
return out
def init_train(self, train_data, dev_data, epochs=10, batch_size=64, learning_rate=0.0005, patience=3):
optimizer = optim.Adam(self.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
best_acc = 0
patience_counter = 0
for epoch in range(epochs):
self.train()
total_loss = 0
for i in range(0, len(train_data), batch_size):
batch = train_data[i:i + batch_size]
inputs, labels = zip(*batch)
inputs = torch.LongTensor(inputs)
labels = torch.LongTensor(labels)
optimizer.zero_grad()
outputs = self(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
total_loss += loss.item()
dev_acc = self.evaluate(dev_data)
print(f"Epoch {epoch+1}/{epochs}, Loss: {total_loss:.4f}, Dev Accuracy: {dev_acc:.4f}")
# early stopping
if dev_acc > best_acc:
best_acc = dev_acc
patience_counter = 0
else:
patience_counter += 1
if patience_counter >= patience:
print("Early stopping")
break
def evaluate(self, data):
self.eval()
correct = 0
with torch.no_grad():
for inputs, labels in data:
inputs = torch.LongTensor([inputs])
labels = torch.LongTensor([labels])
outputs = self(inputs)
_, predicted = torch.max(outputs, 1)
correct += (predicted == labels).sum().item()
return correct / len(data)
def main():
processor = DataProcessor(max_len=100)
train_data = processor.load_data('train.txt')
dev_data = processor.load_data('dev.txt')
test_data = processor.load_data('test.txt')
vocab = processor.build_vocab(train_data)
train_data = processor.preprocess_data(train_data)
dev_data = processor.preprocess_data(dev_data)
test_data = processor.preprocess_data(test_data)
vocab_size = len(vocab)
embed_size = 100
num_classes = 4
model = CNNClassifier(vocab_size, embed_size, num_classes)
model.init_train(train_data, dev_data, epochs=10, batch_size=64, learning_rate=0.001, patience=3)
test_acc = model.evaluate(test_data)
print(f"Test Accuracy: {test_acc:.4f}")
if __name__ == '__main__':
main()
|