|
|
|
|
|
"""ZGBot Training Script |
|
|
File can be executed here (Link to Google Colab): |
|
|
https://colab.research.google.com/drive/1Dyn37CljZnYaQ1dXs3rCOQmdpioKB6-t |
|
|
""" |
|
|
|
|
|
!pip install datasets wandb evaluate accelerate -qU |
|
|
!pip install transformers |
|
|
|
|
|
from huggingface_hub import notebook_login |
|
|
notebook_login() |
|
|
import wandb |
|
|
wandb.login() |
|
|
from datasets import load_dataset |
|
|
squad = load_dataset("squad", split="train[:5000]") |
|
|
squad = squad.train_test_split(test_size=0.2) |
|
|
from transformers import AutoTokenizer |
|
|
tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") |
|
|
|
|
|
def preprocess_function(examples): |
|
|
questions = [q.strip() for q in examples["question"]] |
|
|
inputs = tokenizer( |
|
|
questions, |
|
|
examples["context"], |
|
|
max_length=384, |
|
|
truncation="only_second", |
|
|
return_offsets_mapping=True, |
|
|
padding="max_length", |
|
|
) |
|
|
|
|
|
offset_mapping = inputs.pop("offset_mapping") |
|
|
answers = examples["answers"] |
|
|
start_positions = [] |
|
|
end_positions = [] |
|
|
|
|
|
for i, offset in enumerate(offset_mapping): |
|
|
answer = answers[i] |
|
|
start_char = answer["answer_start"][0] |
|
|
end_char = answer["answer_start"][0] + len(answer["text"][0]) |
|
|
sequence_ids = inputs.sequence_ids(i) |
|
|
|
|
|
|
|
|
idx = 0 |
|
|
while sequence_ids[idx] != 1: |
|
|
idx += 1 |
|
|
context_start = idx |
|
|
while sequence_ids[idx] == 1: |
|
|
idx += 1 |
|
|
context_end = idx - 1 |
|
|
|
|
|
|
|
|
if offset[context_start][0] > end_char or offset[context_end][1] < start_char: |
|
|
start_positions.append(0) |
|
|
end_positions.append(0) |
|
|
else: |
|
|
|
|
|
idx = context_start |
|
|
while idx <= context_end and offset[idx][0] <= start_char: |
|
|
idx += 1 |
|
|
start_positions.append(idx - 1) |
|
|
|
|
|
idx = context_end |
|
|
while idx >= context_start and offset[idx][1] >= end_char: |
|
|
idx -= 1 |
|
|
end_positions.append(idx + 1) |
|
|
|
|
|
inputs["start_positions"] = start_positions |
|
|
inputs["end_positions"] = end_positions |
|
|
return inputs |
|
|
|
|
|
dataset = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names) |
|
|
from transformers import DefaultDataCollator |
|
|
data_collator = DefaultDataCollator() |
|
|
from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer |
|
|
model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-uncased") |
|
|
|
|
|
!pip install transformers[torch] |
|
|
|
|
|
import evaluate |
|
|
import numpy as np |
|
|
metric=evaluate.load("accuracy") |
|
|
def compute_metrics(eval_pred): |
|
|
logits, labels = eval_pred |
|
|
predictions = np.argmax(logits, axis=-1) |
|
|
return metric.compute(predictions=predictions, references=labels) |
|
|
|
|
|
from transformers import Trainer, TrainingArguments |
|
|
|
|
|
args = TrainingArguments( |
|
|
output_dir = "MA-saemi-5", |
|
|
report_to = 'wandb', |
|
|
evaluation_strategy = 'steps', |
|
|
learning_rate = 3e-5, |
|
|
max_steps = 3000, |
|
|
logging_steps = 100, |
|
|
eval_steps = 250, |
|
|
save_steps = 10000, |
|
|
load_best_model_at_end = True, |
|
|
metric_for_best_model = 'accuracy', |
|
|
run_name = 'training5', |
|
|
per_device_train_batch_size=16, |
|
|
per_device_eval_batch_size=16, |
|
|
push_to_hub=True, |
|
|
|
|
|
) |
|
|
|
|
|
trainer = Trainer( |
|
|
model = model, |
|
|
args = args, |
|
|
train_dataset=dataset['train'], |
|
|
eval_dataset=dataset['test'], |
|
|
tokenizer=tokenizer, |
|
|
compute_metrics=compute_metrics, |
|
|
) |
|
|
|
|
|
trainer.train() |
|
|
wandb.finish() |
|
|
trainer.push_to_hub() |
|
|
|