|
|
import pandas as pd |
|
|
from datasets import Dataset |
|
|
from transformers import ( |
|
|
AutoTokenizer, |
|
|
AutoModelForSequenceClassification, |
|
|
TrainingArguments, |
|
|
Trainer |
|
|
) |
|
|
import torch |
|
|
import numpy as np |
|
|
from sklearn.metrics import accuracy_score, f1_score |
|
|
|
|
|
|
|
|
df = pd.read_csv("./../data/text.csv") |
|
|
dataset = Dataset.from_pandas(df) |
|
|
|
|
|
|
|
|
model_name = "distilbert-base-uncased" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
def tokenize(batch): |
|
|
return tokenizer(batch["text"], padding=True, truncation=True) |
|
|
|
|
|
dataset = dataset.map(tokenize, batched=True) |
|
|
dataset = dataset.rename_column("label", "labels") |
|
|
dataset.set_format("torch", columns=["input_ids", "attention_mask", "labels"]) |
|
|
train_test = dataset.train_test_split(test_size=0.2) |
|
|
|
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=2) |
|
|
|
|
|
|
|
|
def compute_metrics(eval_pred): |
|
|
logits, labels = eval_pred |
|
|
preds = np.argmax(logits, axis=-1) |
|
|
return { |
|
|
"accuracy": accuracy_score(labels, preds), |
|
|
"f1": f1_score(labels, preds), |
|
|
} |
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir="./results", |
|
|
num_train_epochs=3, |
|
|
per_device_train_batch_size=16, |
|
|
per_device_eval_batch_size=16, |
|
|
evaluation_strategy="epoch", |
|
|
logging_dir="./logs", |
|
|
save_strategy="epoch", |
|
|
) |
|
|
|
|
|
|
|
|
trainer = Trainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=train_test["train"], |
|
|
eval_dataset=train_test["test"], |
|
|
compute_metrics=compute_metrics, |
|
|
tokenizer=tokenizer, |
|
|
) |
|
|
|
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
model.save_pretrained("PigeonAIModel1") |
|
|
tokenizer.save_pretrained("PigeonAIModel1") |