File size: 2,060 Bytes
4493985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
import numpy as np
from sklearn.metrics import accuracy_score, f1_score

# Data loading
dataset = load_dataset("tweet_eval", "sentiment")

# Model Selection
model_name = "bert-base-uncased"

# Tokenization
tokenizer = AutoTokenizer.from_pretrained(model_name)

def tokenize_function(examples):
    return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=128)

tokenized_datasets = dataset.map(tokenize_function, batched=True)
tokenized_datasets = tokenized_datasets.remove_columns(["text"])
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
tokenized_datasets.set_format("torch")

# Model setup
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3)

def compute_metrics(eval_pred):
    logits, labels = eval_pred
    predictions = np.argmax(logits, axis=-1)  
    accuracy = accuracy_score(labels, predictions)
    f1 = f1_score(labels, predictions, average='macro')  
    return {'accuracy': accuracy, 'f1': f1}

# Training Configuration
training_args = TrainingArguments(
    output_dir="./results",
    num_train_epochs=1,  # Increase for better performance
    per_device_train_batch_size=80,  # Increase if possible
    per_device_eval_batch_size=80,
    warmup_steps=500,  # Adjust warmup steps
    weight_decay=0.01,  # Slightly higher weight decay
    logging_dir='./logs',
    learning_rate=5e-5,  # Slightly higher learning rate
    load_best_model_at_end=True,
    metric_for_best_model='accuracy',  # Track accuracy
    evaluation_strategy="epoch",  # Evaluate at the end of each epoch
    save_strategy="epoch",
    save_total_limit=2,  # Limit saved checkpoints
)

# Training
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["validation"],
    compute_metrics=compute_metrics  # Add the compute_metrics function
)

trainer.train()