File size: 4,291 Bytes
9c2a3b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import torch
import numpy as np
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForSequenceClassification,
TrainingArguments,
Trainer,
set_seed,
EarlyStoppingCallback,
)
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
# Set seed for reproducibility
set_seed(42)
# === Load your dataset ===
dataset = load_dataset("csv", delimiter="\t", data_files={
"train": "data/train.tsv",
"test_1": "data/test_1.tsv",
})
test_2 = load_dataset("csv", delimiter="\t", data_files={"test": "data/test_2.tsv"})["test"]
test_3 = load_dataset("csv", delimiter="\t", data_files={"test": "data/test_3.tsv"})["test"]
# === Split train into train/validation ===
full_train = dataset["train"].train_test_split(test_size=0.1, seed=12345)
dataset_train = full_train["train"]
dataset_valid = full_train["test"]
# === Choose your transformer model ===
model_name = "classla/bcms-bertic"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3)
# === Tokenization function ===
def tokenize(batch):
return tokenizer(batch["Sentence"], padding=True, truncation=True, max_length=128)
# === Tokenize all datasets ===
dataset_train = dataset_train.map(tokenize, batched=True)
dataset_valid = dataset_valid.map(tokenize, batched=True)
dataset["test_1"] = dataset["test_1"].map(tokenize, batched=True)
test_2 = test_2.map(tokenize, batched=True)
test_3 = test_3.map(tokenize, batched=True)
# === Rename label column ===
dataset_train = dataset_train.rename_column("Label", "labels")
dataset_valid = dataset_valid.rename_column("Label", "labels")
dataset["test_1"] = dataset["test_1"].rename_column("Label", "labels")
test_2 = test_2.rename_column("Label", "labels")
test_3 = test_3.rename_column("Label", "labels")
# === Set torch format ===
columns = ["input_ids", "attention_mask", "labels"]
dataset_train.set_format("torch", columns=columns)
dataset_valid.set_format("torch", columns=columns)
dataset["test_1"].set_format("torch", columns=columns)
test_2.set_format("torch", columns=columns)
test_3.set_format("torch", columns=columns)
# === Metrics function ===
def compute_metrics(p):
preds = np.argmax(p.predictions, axis=1)
labels = p.label_ids
acc = accuracy_score(labels, preds)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted', zero_division=0)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
# === Training arguments ===
training_args = TrainingArguments(
output_dir="./bertic-our-group",
evaluation_strategy="steps",
eval_steps=100,
save_strategy="steps",
save_steps=100,
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
num_train_epochs=10,
weight_decay=0.03,
load_best_model_at_end=True,
metric_for_best_model="f1",
greater_is_better=True,
logging_dir="./logs",
logging_steps=50,
save_total_limit=2,
)
# === Trainer and early stopping ===
trainer = Trainer(
model=model,
args=training_args,
train_dataset=dataset_train,
eval_dataset=dataset_valid,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
callbacks=[EarlyStoppingCallback(early_stopping_patience=2)],
)
# === Train the model ===
trainer.train()
# === Evaluation ===
print("\nTraining Evaluation:")
train_metrics = trainer.evaluate(dataset_train)
for k, v in train_metrics.items():
print(f"{k}: {v:.4f}")
print("\nValidation Evaluation:")
val_metrics = trainer.evaluate(dataset_valid)
for k, v in val_metrics.items():
print(f"{k}: {v:.4f}")
print("\nTest Set 1 Evaluation (Group 1):")
test_1_metrics = trainer.evaluate(dataset["test_1"])
for k, v in test_1_metrics.items():
print(f"{k}: {v:.4f}")
print("\nTest Set 2 Evaluation (Group 2):")
test_2_metrics = trainer.evaluate(test_2)
for k, v in test_2_metrics.items():
print(f"{k}: {v:.4f}")
print("\nTest Set 3 Evaluation (Group 3 - Us):")
test_3_metrics = trainer.evaluate(test_3)
for k, v in test_3_metrics.items():
print(f"{k}: {v:.4f}")
# === Save the final model and tokenizer ===
trainer.model.save_pretrained("bertic")
tokenizer.save_pretrained("bertic") |