| import pandas as pd |
| from datasets import Dataset |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments |
| from sklearn.metrics import accuracy_score, precision_recall_fscore_support |
| import numpy as np |
|
|
| |
| dev_df = pd.read_csv("./data/DISAPERE-main/SELFExtractedData/disapere_topic_dev.csv") |
| train_df = pd.read_csv("./data/DISAPERE-main/SELFExtractedData/disapere_topic_train.csv") |
| test_df = pd.read_csv("./data/DISAPERE-main/SELFExtractedData/disapere_topic_test.csv") |
|
|
| |
| train_ds = Dataset.from_pandas(train_df) |
| dev_ds = Dataset.from_pandas(dev_df) |
| test_ds = Dataset.from_pandas(test_df) |
|
|
| |
| model_name = "allenai/scibert_scivocab_uncased" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
| def tokenize(batch): |
| return tokenizer(batch["text"], padding="max_length", truncation=True, max_length=256) |
|
|
| train_ds = train_ds.map(tokenize, batched=True) |
| dev_ds = dev_ds.map(tokenize, batched=True) |
| test_ds = test_ds.map(tokenize, batched=True) |
|
|
| |
| train_ds.set_format(type="torch", columns=["input_ids", "attention_mask", "label"]) |
| dev_ds.set_format(type="torch", columns=["input_ids", "attention_mask", "label"]) |
| test_ds.set_format(type="torch", columns=["input_ids", "attention_mask", "label"]) |
|
|
| print(train_df['label'].value_counts().sort_index()) |
|
|
|
|
| |
| model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=8) |
|
|
| |
| def compute_metrics(eval_pred): |
| logits, labels = eval_pred |
| preds = np.argmax(logits, axis=1) |
| precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average="macro") |
| acc = accuracy_score(labels, preds) |
| return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall} |
|
|
| |
| args = TrainingArguments( |
| output_dir="./scibert/scibert_topic/checkpoints", |
| eval_strategy="epoch", |
| save_strategy="epoch", |
| learning_rate=2e-5, |
| per_device_train_batch_size=8, |
| per_device_eval_batch_size=16, |
| num_train_epochs=4, |
| weight_decay=0.01, |
| load_best_model_at_end=True, |
| metric_for_best_model="f1" |
| ) |
|
|
| |
| trainer = Trainer( |
| model=model, |
| args=args, |
| train_dataset=train_ds, |
| eval_dataset=dev_ds, |
| tokenizer=tokenizer, |
| compute_metrics=compute_metrics |
| ) |
|
|
| |
| trainer.train() |
|
|
| |
| results = trainer.evaluate(test_ds) |
| print("Test results:", results) |
|
|
| |
| model.save_pretrained("./scibert/scibert_topic/final_model") |
| tokenizer.save_pretrained("./scibert/scibert_topic/final_model") |