trainedllm / llm.py
vkolasani's picture
Rename main.py to llm.py
481f8f5 verified
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
import numpy as np
from sklearn.metrics import accuracy_score, f1_score
# Data loading
dataset = load_dataset("tweet_eval", "sentiment")
# Model Selection
model_name = "bert-base-uncased"
# Tokenization
tokenizer = AutoTokenizer.from_pretrained(model_name)
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=128)
tokenized_datasets = dataset.map(tokenize_function, batched=True)
tokenized_datasets = tokenized_datasets.remove_columns(["text"])
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
tokenized_datasets.set_format("torch")
# Model setup
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3)
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
accuracy = accuracy_score(labels, predictions)
f1 = f1_score(labels, predictions, average='macro')
return {'accuracy': accuracy, 'f1': f1}
# Training Configuration
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=1, # Increase for better performance
per_device_train_batch_size=80, # Increase if possible
per_device_eval_batch_size=80,
warmup_steps=500, # Adjust warmup steps
weight_decay=0.01, # Slightly higher weight decay
logging_dir='./logs',
learning_rate=5e-5, # Slightly higher learning rate
load_best_model_at_end=True,
metric_for_best_model='accuracy', # Track accuracy
evaluation_strategy="epoch", # Evaluate at the end of each epoch
save_strategy="epoch",
save_total_limit=2, # Limit saved checkpoints
)
# Training
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
compute_metrics=compute_metrics # Add the compute_metrics function
)
trainer.train()