|
|
from datasets import load_dataset |
|
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments |
|
|
import numpy as np |
|
|
from sklearn.metrics import accuracy_score, f1_score |
|
|
|
|
|
|
|
|
dataset = load_dataset("tweet_eval", "sentiment") |
|
|
|
|
|
|
|
|
model_name = "bert-base-uncased" |
|
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
def tokenize_function(examples): |
|
|
return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=128) |
|
|
|
|
|
tokenized_datasets = dataset.map(tokenize_function, batched=True) |
|
|
tokenized_datasets = tokenized_datasets.remove_columns(["text"]) |
|
|
tokenized_datasets = tokenized_datasets.rename_column("label", "labels") |
|
|
tokenized_datasets.set_format("torch") |
|
|
|
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3) |
|
|
|
|
|
def compute_metrics(eval_pred): |
|
|
logits, labels = eval_pred |
|
|
predictions = np.argmax(logits, axis=-1) |
|
|
accuracy = accuracy_score(labels, predictions) |
|
|
f1 = f1_score(labels, predictions, average='macro') |
|
|
return {'accuracy': accuracy, 'f1': f1} |
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir="./results", |
|
|
num_train_epochs=1, |
|
|
per_device_train_batch_size=80, |
|
|
per_device_eval_batch_size=80, |
|
|
warmup_steps=500, |
|
|
weight_decay=0.01, |
|
|
logging_dir='./logs', |
|
|
learning_rate=5e-5, |
|
|
load_best_model_at_end=True, |
|
|
metric_for_best_model='accuracy', |
|
|
evaluation_strategy="epoch", |
|
|
save_strategy="epoch", |
|
|
save_total_limit=2, |
|
|
) |
|
|
|
|
|
|
|
|
trainer = Trainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=tokenized_datasets["train"], |
|
|
eval_dataset=tokenized_datasets["validation"], |
|
|
compute_metrics=compute_metrics |
|
|
) |
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|