| import torch
|
| import torch.nn as nn
|
| import torch.optim as optim
|
| from torch.utils.data import Dataset, DataLoader
|
| import nltk
|
| from nltk.util import ngrams
|
| from collections import Counter
|
| import numpy as np
|
|
|
|
|
| nltk.download('punkt')
|
|
|
| nltk.download('stopwords')
|
|
|
|
|
| class TextDataset(Dataset):
|
| def __init__(self, filepath, n=3, min_freq=1):
|
| self.n = n
|
| self.data = self.load_and_preprocess(filepath, min_freq)
|
|
|
| def load_and_preprocess(self, filepath, min_freq):
|
| with open(filepath, 'r', encoding='utf-8') as f:
|
| text = f.read()
|
|
|
|
|
| tokens = nltk.word_tokenize(text.lower())
|
|
|
|
|
| n_grams = ngrams(tokens, self.n)
|
| ngram_counts = Counter(n_grams)
|
|
|
|
|
| filtered_ngrams = [ngram for ngram, count in ngram_counts.items() if count >= min_freq]
|
|
|
|
|
| self.vocabulary = sorted(set(token for ngram in filtered_ngrams for token in ngram))
|
| self.word_to_index = {word: index for index, word in enumerate(self.vocabulary)}
|
| self.index_to_word = {index: word for word, index in self.word_to_index.items()}
|
|
|
|
|
| data = []
|
| for ngram in filtered_ngrams:
|
| context = [self.word_to_index[token] for token in ngram[:-1]]
|
| target = self.word_to_index[ngram[-1]]
|
| data.append((context, target))
|
| return data
|
|
|
| def __len__(self):
|
| return len(self.data)
|
|
|
| def __getitem__(self, idx):
|
| context, target = self.data[idx]
|
| return torch.tensor(context), torch.tensor(target)
|
|
|
|
|
| class LanguageModel(nn.Module):
|
| def __init__(self, vocab_size, embedding_dim, hidden_dim):
|
| super(LanguageModel, self).__init__()
|
| self.embedding = nn.Embedding(vocab_size, embedding_dim)
|
| self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
|
| self.linear = nn.Linear(hidden_dim, vocab_size)
|
|
|
| def forward(self, context):
|
| embedded = self.embedding(context)
|
| output, _ = self.lstm(embedded)
|
| output = self.linear(output[:, -1, :])
|
| return output
|
|
|
|
|
|
|
| filepath = 'dataset.txt'
|
| n_gram_size = 3
|
| min_frequency = 2
|
| embedding_dimension = 32
|
| hidden_dimension = 64
|
| learning_rate = 0.01
|
| batch_size = 32
|
| epochs = 10
|
|
|
|
|
| dataset = TextDataset(filepath, n_gram_size, min_frequency)
|
| dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
|
|
|
|
|
| vocab_size = len(dataset.vocabulary)
|
| model = LanguageModel(vocab_size, embedding_dimension, hidden_dimension)
|
|
|
|
|
| criterion = nn.CrossEntropyLoss()
|
| optimizer = optim.Adam(model.parameters(), lr=learning_rate)
|
|
|
|
|
| for epoch in range(epochs):
|
| for contexts, targets in dataloader:
|
| optimizer.zero_grad()
|
| outputs = model(contexts)
|
| loss = criterion(outputs, targets)
|
| loss.backward()
|
| optimizer.step()
|
|
|
| print(f"Epoch [{epoch+1}/{epochs}], Loss: {loss.item():.4f}")
|
|
|
|
|
| torch.save(model.state_dict(), 'language_model.pth')
|
|
|
| print("Training complete. Model saved as language_model.pth")
|
|
|
|
|
|
|
|
|
| def generate_text(model, dataset, start_sequence="the", max_length=50):
|
| model.eval()
|
| tokens = start_sequence.split()
|
| context = [dataset.word_to_index[token] for token in tokens]
|
| context_tensor = torch.tensor([context])
|
|
|
| generated_text = tokens[:]
|
|
|
| for _ in range(max_length):
|
| with torch.no_grad():
|
| output = model(context_tensor)
|
| predicted_index = torch.argmax(output).item()
|
| predicted_word = dataset.index_to_word[predicted_index]
|
| generated_text.append(predicted_word)
|
| context.append(predicted_index)
|
| context = context[-n_gram_size+1:]
|
| context_tensor = torch.tensor([context])
|
|
|
| if predicted_word == ".":
|
| break
|
|
|
| return " ".join(generated_text)
|
|
|
|
|
|
|
| model = LanguageModel(vocab_size, embedding_dimension, hidden_dimension)
|
| model.load_state_dict(torch.load('language_model.pth'))
|
| model.eval()
|
|
|
| generated_text = generate_text(model, dataset, start_sequence="the quick brown")
|
| print(generated_text) |