|
|
|
|
|
from transformers import T5Tokenizer, T5ForConditionalGeneration, Trainer, TrainingArguments |
|
|
from datasets import load_dataset |
|
|
|
|
|
|
|
|
model_name = "t5-small" |
|
|
tokenizer = T5Tokenizer.from_pretrained(model_name) |
|
|
model = T5ForConditionalGeneration.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
dataset = load_dataset("json", data_files={"train": "train.json"}) |
|
|
evalset = load_dataset("json", data_files={"eval": "eval.json"}) |
|
|
|
|
|
def preprocess_function(examples): |
|
|
inputs = ["Generate a question for: " + (ans if isinstance(ans, str) else "Unknown") for ans in examples["answer"]] |
|
|
model_inputs = tokenizer(inputs, max_length=512, truncation=True, padding="max_length") |
|
|
|
|
|
labels = [q if isinstance(q, str) else "" for q in examples["question"]] |
|
|
labels = tokenizer(labels, max_length=128, truncation=True, padding="max_length") |
|
|
|
|
|
model_inputs["labels"] = labels["input_ids"] |
|
|
return model_inputs |
|
|
|
|
|
tokenized_datasets = dataset.map(preprocess_function, batched=True) |
|
|
tokenized_evalsets = evalset.map(preprocess_function, batched=True) |
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir="./results", |
|
|
evaluation_strategy="epoch", |
|
|
save_strategy="epoch", |
|
|
per_device_train_batch_size=8, |
|
|
per_device_eval_batch_size=8, |
|
|
num_train_epochs=3, |
|
|
weight_decay=0.01, |
|
|
logging_dir="./logs", |
|
|
) |
|
|
|
|
|
trainer = Trainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=tokenized_datasets["train"], |
|
|
eval_dataset=tokenized_evalsets["eval"] |
|
|
) |
|
|
|
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
output_dir = "./aq_model" |
|
|
trainer.save_model(output_dir) |
|
|
tokenizer.save_pretrained(output_dir) |
|
|
|
|
|
print(f"Model saved to {output_dir}") |
|
|
|
|
|
|
|
|
|