| |
| """ |
| NLP From Scratch: Classifying Names with a Character-Level RNN |
| ************************************************************** |
| **Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_ |
| |
| We will be building and training a basic character-level RNN to classify |
| words. This tutorial, along with the following two, show how to do |
| preprocess data for NLP modeling "from scratch", in particular not using |
| many of the convenience functions of `torchtext`, so you can see how |
| preprocessing for NLP modeling works at a low level. |
| |
| A character-level RNN reads words as a series of characters - |
| outputting a prediction and "hidden state" at each step, feeding its |
| previous hidden state into each next step. We take the final prediction |
| to be the output, i.e. which class the word belongs to. |
| |
| Specifically, we'll train on a few thousand surnames from 18 languages |
| of origin, and predict which language a name is from based on the |
| spelling: |
| |
| :: |
| |
| $ python predict.py Hinton |
| (-0.47) Scottish |
| (-1.52) English |
| (-3.57) Irish |
| |
| $ python predict.py Schmidhuber |
| (-0.19) German |
| (-2.48) Czech |
| (-2.68) Dutch |
| |
| |
| **Recommended Reading:** |
| |
| I assume you have at least installed PyTorch, know Python, and |
| understand Tensors: |
| |
| - https://pytorch.org/ For installation instructions |
| - :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general |
| - :doc:`/beginner/pytorch_with_examples` for a wide and deep overview |
| - :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user |
| |
| It would also be useful to know about RNNs and how they work: |
| |
| - `The Unreasonable Effectiveness of Recurrent Neural |
| Networks <https://karpathy.github.io/2015/05/21/rnn-effectiveness/>`__ |
| shows a bunch of real life examples |
| - `Understanding LSTM |
| Networks <https://colah.github.io/posts/2015-08-Understanding-LSTMs/>`__ |
| is about LSTMs specifically but also informative about RNNs in |
| general |
| |
| Preparing the Data |
| ================== |
| |
| .. Note:: |
| Download the data from |
| `here <https://download.pytorch.org/tutorial/data.zip>`_ |
| and extract it to the current directory. |
| |
| Included in the ``data/names`` directory are 18 text files named as |
| "[Language].txt". Each file contains a bunch of names, one name per |
| line, mostly romanized (but we still need to convert from Unicode to |
| ASCII). |
| |
| We'll end up with a dictionary of lists of names per language, |
| ``{language: [names ...]}``. The generic variables "category" and "line" |
| (for language and name in our case) are used for later extensibility. |
| """ |
| from __future__ import unicode_literals, print_function, division |
| from io import open |
| import glob |
| import os |
|
|
| def findFiles(path): return glob.glob(path) |
|
|
| print(findFiles('data/names/*.txt')) |
|
|
| import unicodedata |
| import string |
|
|
| all_letters = string.ascii_letters + " .,;'" |
| n_letters = len(all_letters) |
|
|
| |
| def unicodeToAscii(s): |
| return ''.join( |
| c for c in unicodedata.normalize('NFD', s) |
| if unicodedata.category(c) != 'Mn' |
| and c in all_letters |
| ) |
|
|
| print(unicodeToAscii('Ślusàrski')) |
|
|
| |
| category_lines = {} |
| all_categories = [] |
|
|
| |
| def readLines(filename): |
| lines = open(filename, encoding='utf-8').read().strip().split('\n') |
| return [unicodeToAscii(line) for line in lines] |
|
|
| for filename in findFiles('data/names/*.txt'): |
| category = os.path.splitext(os.path.basename(filename))[0] |
| all_categories.append(category) |
| lines = readLines(filename) |
| category_lines[category] = lines |
|
|
| n_categories = len(all_categories) |
|
|
|
|
| |
| |
| |
| |
| |
| |
|
|
| print(category_lines['Italian'][:5]) |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import torch |
|
|
| |
| def letterToIndex(letter): |
| return all_letters.find(letter) |
|
|
| |
| def letterToTensor(letter): |
| tensor = torch.zeros(1, n_letters) |
| tensor[0][letterToIndex(letter)] = 1 |
| return tensor |
|
|
| |
| |
| def lineToTensor(line): |
| tensor = torch.zeros(len(line), 1, n_letters) |
| for li, letter in enumerate(line): |
| tensor[li][0][letterToIndex(letter)] = 1 |
| return tensor |
|
|
| print(letterToTensor('J')) |
|
|
| print(lineToTensor('Jones').size()) |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import torch.nn as nn |
|
|
| class RNN(nn.Module): |
| def __init__(self, input_size, hidden_size, output_size): |
| super(RNN, self).__init__() |
|
|
| self.hidden_size = hidden_size |
|
|
| self.i2h = nn.Linear(input_size + hidden_size, hidden_size) |
| self.i2o = nn.Linear(input_size + hidden_size, output_size) |
| self.softmax = nn.LogSoftmax(dim=1) |
|
|
| def forward(self, input, hidden): |
| combined = torch.cat((input, hidden), 1) |
| hidden = self.i2h(combined) |
| output = self.i2o(combined) |
| output = self.softmax(output) |
| return output, hidden |
|
|
| def initHidden(self): |
| return torch.zeros(1, self.hidden_size) |
|
|
| n_hidden = 128 |
| rnn = RNN(n_letters, n_hidden, n_categories) |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| input = letterToTensor('A') |
| hidden =torch.zeros(1, n_hidden) |
|
|
| output, next_hidden = rnn(input, hidden) |
|
|
|
|
| |
| |
| |
| |
| |
| |
|
|
| input = lineToTensor('Albert') |
| hidden = torch.zeros(1, n_hidden) |
|
|
| output, next_hidden = rnn(input[0], hidden) |
| print(output) |
|
|
|
|
| |
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| def categoryFromOutput(output): |
| top_n, top_i = output.topk(1) |
| category_i = top_i[0].item() |
| return all_categories[category_i], category_i |
|
|
| print(categoryFromOutput(output)) |
|
|
|
|
| |
| |
| |
| |
|
|
| import random |
|
|
| def randomChoice(l): |
| return l[random.randint(0, len(l) - 1)] |
|
|
| def randomTrainingExample(): |
| category = randomChoice(all_categories) |
| line = randomChoice(category_lines[category]) |
| category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long) |
| line_tensor = lineToTensor(line) |
| return category, line, category_tensor, line_tensor |
|
|
| for i in range(10): |
| category, line, category_tensor, line_tensor = randomTrainingExample() |
| print('category =', category, '/ line =', line) |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| criterion = nn.NLLLoss() |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| learning_rate = 0.005 |
|
|
| def train(category_tensor, line_tensor): |
| hidden = rnn.initHidden() |
|
|
| rnn.zero_grad() |
|
|
| for i in range(line_tensor.size()[0]): |
| output, hidden = rnn(line_tensor[i], hidden) |
|
|
| loss = criterion(output, category_tensor) |
| loss.backward() |
|
|
| |
| for p in rnn.parameters(): |
| p.data.add_(p.grad.data, alpha=-learning_rate) |
|
|
| return output, loss.item() |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| import time |
| import math |
|
|
| n_iters = 100000 |
| print_every = 5000 |
| plot_every = 1000 |
|
|
|
|
|
|
| |
| current_loss = 0 |
| all_losses = [] |
|
|
| def timeSince(since): |
| now = time.time() |
| s = now - since |
| m = math.floor(s / 60) |
| s -= m * 60 |
| return '%dm %ds' % (m, s) |
|
|
| start = time.time() |
|
|
| for iter in range(1, n_iters + 1): |
| category, line, category_tensor, line_tensor = randomTrainingExample() |
| output, loss = train(category_tensor, line_tensor) |
| current_loss += loss |
|
|
| |
| if iter % print_every == 0: |
| guess, guess_i = categoryFromOutput(output) |
| correct = '✓' if guess == category else '✗ (%s)' % category |
| print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct)) |
|
|
| |
| if iter % plot_every == 0: |
| all_losses.append(current_loss / plot_every) |
| current_loss = 0 |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
| import matplotlib.pyplot as plt |
| import matplotlib.ticker as ticker |
|
|
| plt.figure() |
| plt.plot(all_losses) |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| confusion = torch.zeros(n_categories, n_categories) |
| n_confusion = 10000 |
|
|
| |
| def evaluate(line_tensor): |
| hidden = rnn.initHidden() |
|
|
| for i in range(line_tensor.size()[0]): |
| output, hidden = rnn(line_tensor[i], hidden) |
|
|
| return output |
|
|
| |
| for i in range(n_confusion): |
| category, line, category_tensor, line_tensor = randomTrainingExample() |
| output = evaluate(line_tensor) |
| guess, guess_i = categoryFromOutput(output) |
| category_i = all_categories.index(category) |
| confusion[category_i][guess_i] += 1 |
|
|
| |
| for i in range(n_categories): |
| confusion[i] = confusion[i] / confusion[i].sum() |
|
|
| |
| fig = plt.figure() |
| ax = fig.add_subplot(111) |
| cax = ax.matshow(confusion.numpy()) |
| fig.colorbar(cax) |
|
|
| |
| ax.set_xticklabels([''] + all_categories, rotation=90) |
| ax.set_yticklabels([''] + all_categories) |
|
|
| |
| ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) |
| ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) |
|
|
| |
| plt.show() |
|
|
|
|
| |
| |
| |
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
|
|
| def predict(input_line, n_predictions=3): |
| print('\n> %s' % input_line) |
| with torch.no_grad(): |
| output = evaluate(lineToTensor(input_line)) |
|
|
| |
| topv, topi = output.topk(n_predictions, 1, True) |
| predictions = [] |
|
|
| for i in range(n_predictions): |
| value = topv[0][i].item() |
| category_index = topi[0][i].item() |
| print('(%.2f) %s' % (value, all_categories[category_index])) |
| predictions.append([value, all_categories[category_index]]) |
|
|
| predict('Dovesky') |
| predict('Jackson') |
| predict('Satoshi') |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|