File size: 5,170 Bytes
2086153 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import logging
from datetime import datetime
import re
from collections import Counter
import pandas as pd
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from torch.utils.data import Dataset, DataLoader
from transformers import (
BertConfig,
BertForSequenceClassification,
BertTokenizer,
Trainer,
TrainingArguments,
EarlyStoppingCallback,
)
from sklearn.model_selection import train_test_split
from sklearn.metrics import (
accuracy_score,
f1_score,
precision_score,
recall_score,
confusion_matrix,
)
from sklearn.utils.class_weight import compute_class_weight
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
config = BertConfig.from_pretrained("bert-base-uncased", num_labels=2)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class WeightedBertForSequenceClassification(BertForSequenceClassification):
def __init__(self, config, class_weights):
super().__init__(config)
self.class_weights = class_weights
def forward(self, input_ids=None, attention_mask=None, labels=None, **kwargs):
outputs = super().forward(input_ids=input_ids, attention_mask=attention_mask, labels=None, **kwargs)
logits = outputs.logits
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(weight=self.class_weights)
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
return {"loss": loss, "logits": logits}
class SMSClassificationDataset(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = torch.tensor(labels, dtype=torch.long)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {key: val[idx] for key, val in self.encodings.items()}
item["labels"] = self.labels[idx]
return item
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = torch.argmax(torch.tensor(logits), dim=1)
acc = accuracy_score(labels, predictions)
precision = precision_score(labels, predictions, average="weighted", zero_division=0)
recall = recall_score(labels, predictions, average="weighted")
f1 = f1_score(labels, predictions, average='weighted')
cm = confusion_matrix(labels, predictions)
print("Confusion Matrix:\n", cm)
return {
'accuracy': acc,
'precision': precision,
'recall': recall,
'f1': f1
}
def train():
df = pd.read_csv('data/spam.csv', encoding='iso-8859-1')[['label', 'text']]
label_mapping = {'spam': 1, 'ham': 0}
df['label'] = df['label'].map(label_mapping)
train_texts, val_texts, train_labels, val_labels = train_test_split(
df['text'].tolist(), df['label'].tolist(), test_size=0.25, random_state=42)
class_weights = compute_class_weight(
class_weight='balanced',
classes=np.unique(train_labels),
y=train_labels
)
class_weights = torch.tensor(class_weights, dtype=torch.float).to(device)
model = WeightedBertForSequenceClassification(config, class_weights=class_weights)
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
if "transformers" in logger.name.lower():
logger.setLevel(logging.ERROR)
model.load_state_dict(BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2, use_safetensors=True, return_dict=False, attn_implementation="sdpa").state_dict(), strict=False)
model.to(device)
train_encodings = tokenizer(train_texts, truncation=True, padding=True, return_tensors="pt")
val_encodings = tokenizer(val_texts, truncation=True, padding=True, return_tensors="pt")
train_dataset = SMSClassificationDataset(train_encodings, train_labels)
val_dataset = SMSClassificationDataset(val_encodings, val_labels)
training_args = TrainingArguments(
output_dir='./models/pretrained',
num_train_epochs=5,
per_device_train_batch_size=8,
per_device_eval_batch_size=16,
warmup_steps=500,
weight_decay=0.01,
logging_dir='./logs',
logging_steps=10,
eval_strategy="epoch",
report_to="none",
save_total_limit=1,
load_best_model_at_end=True,
save_strategy="epoch",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
compute_metrics=compute_metrics,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3)]
)
trainer.train()
logs = trainer.state.log_history
df_logs = pd.DataFrame(logs)
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
df_logs.to_csv(f"logs/training_logs_{timestamp}.csv", index=False)
tokenizer.save_pretrained('./models/pretrained')
model.save_pretrained('./models/pretrained')
if __name__ == "__main__":
train()
|