File size: 7,317 Bytes
625448f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
from TorchCRF import CRF
from sklearn.model_selection import train_test_split
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import Dataset, DataLoader
from torch.cuda.amp import autocast, GradScaler

# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")


# Define the BiLSTM-CRF model with Layer Normalization
class BiLSTMCRFModel(nn.Module):
    def __init__(self, vocab_size, embedding_dim, hidden_dim, num_labels):
        super(BiLSTMCRFModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
        self.lstm = nn.LSTM(embedding_dim, hidden_dim, bidirectional=True, batch_first=True)
        self.layer_norm = nn.LayerNorm(hidden_dim * 2)  # Layer Normalization
        self.fc = nn.Linear(hidden_dim * 2, num_labels)
        self.crf = CRF(num_labels)

    def forward(self, words, attention_mask, labels=None):
        embedded = self.embedding(words)
        lstm_out, _ = self.lstm(embedded)
        lstm_out = self.layer_norm(lstm_out)  # Stabilize outputs
        emissions = self.fc(lstm_out)

        if labels is not None:
            loss = -self.crf(emissions, labels, mask=attention_mask.bool())
            return loss
        else:
            return self.crf.viterbi_decode(emissions, mask=attention_mask.bool())


# Dataset class
class NERDataset(Dataset):
    def __init__(self, words, tags):
        self.words = words
        self.tags = tags

    def __len__(self):
        return len(self.words)

    def __getitem__(self, idx):
        return torch.tensor(self.words[idx]), torch.tensor(self.tags[idx])


# Proper collate function for DataLoader
def collate_fn(batch):
    words, tags = zip(*batch)  # Unpack batch into separate lists
    words_padded = pad_sequence(words, batch_first=True, padding_value=0)
    tags_padded = pad_sequence(tags, batch_first=True, padding_value=0)
    return words_padded, tags_padded


# Load and preprocess data
def prepare_data(df):
    df['Tag'] = df['Tag'].fillna('O').astype(str).apply(lambda x: x.strip().upper())

    word_to_id = {word: idx for idx, word in enumerate(set(df['Word']))}
    word_to_id['<UNK>'] = len(word_to_id)

    tag_to_id = {tag: idx for idx, tag in enumerate(set(df['Tag']))}
    id_to_tag = {idx: tag for tag, idx in tag_to_id.items()}

    words, tags = [], []
    for _, group in df.groupby('Sentence'):
        words.append([word_to_id.get(w, word_to_id['<UNK>']) for w in group['Word']])
        tags.append([tag_to_id[t] for t in group['Tag']])

    return words, tags, word_to_id, tag_to_id, id_to_tag


# Load dataset
df = pd.read_excel('Augmented_Dataset.xlsx', engine='openpyxl')

# Shuffle the dataset before splitting
df = df.sample(frac=1, random_state=42).reset_index(drop=True)  # Shuffling the dataset

words, tags, word_to_id, tag_to_id, id_to_tag = prepare_data(df)

# Split into train and test
train_words, test_words, train_tags, test_tags = train_test_split(words, tags, test_size=0.2, random_state=42,
                                                                  shuffle=True)

# Create PyTorch DataLoaders
train_dataset = NERDataset(train_words, train_tags)
test_dataset = NERDataset(test_words, test_tags)

train_loader = DataLoader(train_dataset, batch_size=256, shuffle=True, collate_fn=collate_fn)
test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False, collate_fn=collate_fn)

# Model initialization
vocab_size = len(word_to_id)
embedding_dim = 100
hidden_dim = 128
num_labels = len(tag_to_id)

model = BiLSTMCRFModel(vocab_size, embedding_dim, hidden_dim, num_labels).to(device)
optimizer = optim.AdamW(model.parameters(), lr=0.001, weight_decay=1e-5)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=2)
scaler = GradScaler()  # Mixed precision training

# Training loop with optimizations
num_epochs = 10
accumulation_steps = 4
best_loss = float('inf')

print("Starting Training...")
for epoch in range(num_epochs):
    model.train()
    total_loss = 0
    optimizer.zero_grad()

    for i, (batch_words, batch_tags) in enumerate(train_loader):
        batch_words, batch_tags = batch_words.to(device), batch_tags.to(device)
        attention_mask = (batch_words != 0).to(device)

        with autocast():  # Mixed precision training
            loss = model(batch_words, attention_mask, batch_tags)
            loss = loss.mean() / accumulation_steps  # Scale loss

        scaler.scale(loss).backward()  # Scale gradients

        if (i + 1) % accumulation_steps == 0:
            scaler.step(optimizer)
            scaler.update()
            optimizer.zero_grad()

        total_loss += loss.item()

    avg_loss = total_loss / len(train_loader)
    scheduler.step(avg_loss)

    print(f"Epoch {epoch + 1}, Loss: {avg_loss:.4f}, LR: {optimizer.param_groups[0]['lr']}")

    if avg_loss < best_loss:
        best_loss = avg_loss
        torch.save(model.state_dict(), "best_model.pth")
        print(f"New best model saved with loss: {best_loss:.4f}")

    torch.cuda.empty_cache()  # Free GPU memory

print("Training Complete!")

# Evaluate model
def evaluate_model(model, test_loader, id_to_tag):
    model.eval()
    true_labels, pred_labels = [], []

    with torch.no_grad():
        for batch_words, batch_tags in test_loader:
            batch_words, batch_tags = batch_words.to(device), batch_tags.to(device)
            attention_mask = (batch_words != 0).to(device)  # Masking out padding tokens

            pred_tags = model(batch_words, attention_mask)

            for i in range(batch_words.shape[0]):  # Iterate over batch
                true_seq = batch_tags[i].tolist()
                pred_seq = pred_tags[i]

                # Remove padding (ignore 0-padded labels)
                true_seq_filtered = [id_to_tag[t] for t in true_seq if t in id_to_tag]
                pred_seq_filtered = [id_to_tag[p] for p in pred_seq if p in id_to_tag]

                # Ensure equal lengths (trim longer list)
                min_len = min(len(true_seq_filtered), len(pred_seq_filtered))
                true_labels.extend(true_seq_filtered[:min_len])
                pred_labels.extend(pred_seq_filtered[:min_len])

    # Check if lengths are now consistent
    assert len(true_labels) == len(pred_labels), "Mismatch in true and predicted label counts!"

    from sklearn.metrics import classification_report, confusion_matrix
    import seaborn as sns
    import matplotlib.pyplot as plt

    print("Classification Report:")
    print(classification_report(true_labels, pred_labels))

    cm = confusion_matrix(true_labels, pred_labels, labels=list(id_to_tag.values()))
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=list(id_to_tag.values()), yticklabels=list(id_to_tag.values()))
    plt.xlabel('Predicted')
    plt.ylabel('True')
    plt.title('Confusion Matrix')
    plt.show()



# Evaluate
print("\nFinal Evaluation:")
evaluate_model(model, test_loader, id_to_tag)