| |
|
| | import os
|
| | import logging
|
| | from transformers import (
|
| | AutoTokenizer,
|
| | AutoModelForCausalLM,
|
| | Trainer,
|
| | TrainingArguments,
|
| | DataCollatorForLanguageModeling,
|
| | )
|
| | from datasets import load_dataset
|
| |
|
| |
|
| | logging.basicConfig(level=logging.INFO)
|
| | logger = logging.getLogger(__name__)
|
| |
|
| |
|
| | model_name = "sshleifer/tiny-gpt2"
|
| | tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| |
|
| |
|
| | if tokenizer.pad_token is None:
|
| |
|
| | tokenizer.pad_token = tokenizer.eos_token
|
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | model = AutoModelForCausalLM.from_pretrained(model_name)
|
| |
|
| |
|
| |
|
| | dataset = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
|
| |
|
| |
|
| | def tokenize_function(examples):
|
| |
|
| |
|
| | return tokenizer(
|
| | examples["text"],
|
| | truncation=True,
|
| | max_length=32,
|
| | padding="max_length"
|
| | )
|
| |
|
| |
|
| | tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=["text"])
|
| |
|
| |
|
| | data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
|
| |
|
| |
|
| | training_args = TrainingArguments(
|
| | output_dir="./gpt2-finetuned",
|
| | overwrite_output_dir=True,
|
| | num_train_epochs=1,
|
| | per_device_train_batch_size=8,
|
| | save_steps=500,
|
| | save_total_limit=2,
|
| | logging_steps=100,
|
| | prediction_loss_only=True,
|
| | )
|
| |
|
| |
|
| | trainer = Trainer(
|
| | model=model,
|
| | args=training_args,
|
| | train_dataset=tokenized_dataset,
|
| | data_collator=data_collator,
|
| | )
|
| |
|
| |
|
| | logger.info("Starting training...")
|
| | trainer.train()
|
| |
|
| |
|
| | model.save_pretrained("./gpt2-finetuned")
|
| | tokenizer.save_pretrained("./gpt2-finetuned")
|
| | logger.info("Training complete and model saved.")
|
| |
|