|
|
| import os
|
| import logging
|
| import torch
|
| from transformers import (
|
| AutoTokenizer,
|
| AutoModelForCausalLM,
|
| Trainer,
|
| TrainingArguments,
|
| DataCollatorForLanguageModeling,
|
| get_cosine_schedule_with_warmup,
|
| )
|
| from datasets import load_dataset
|
|
|
|
|
| logging.basicConfig(level=logging.INFO)
|
| logger = logging.getLogger(__name__)
|
|
|
|
|
| model_name = "sshleifer/tiny-gpt2"
|
| tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
|
| if tokenizer.pad_token is None:
|
|
|
| tokenizer.pad_token = tokenizer.eos_token
|
|
|
| tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
|
|
|
|
|
|
|
|
| model = AutoModelForCausalLM.from_pretrained(model_name)
|
|
|
|
|
|
|
| dataset = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
|
|
|
|
|
| def tokenize_function(examples):
|
|
|
|
|
| return tokenizer(
|
| examples["text"],
|
| truncation=True,
|
| max_length=32,
|
| padding="max_length"
|
| )
|
|
|
|
|
| tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=["text"])
|
|
|
|
|
| data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
|
|
|
|
|
| training_args = TrainingArguments(
|
| output_dir="./gpt2-finetuned",
|
| overwrite_output_dir=True,
|
| num_train_epochs=1,
|
| per_device_train_batch_size=8,
|
| save_steps=1000,
|
| save_total_limit=2,
|
| logging_steps=100,
|
| prediction_loss_only=True,
|
| )
|
|
|
|
|
|
|
| num_update_steps_per_epoch = len(tokenized_dataset) // training_args.per_device_train_batch_size
|
| max_train_steps = training_args.num_train_epochs * num_update_steps_per_epoch
|
|
|
|
|
| optimizer = torch.optim.AdamW(model.parameters(), lr=0.1, weight_decay=0.1)
|
|
|
|
|
| scheduler = get_cosine_schedule_with_warmup(
|
| optimizer,
|
| num_warmup_steps=100,
|
| num_training_steps=max_train_steps
|
| )
|
|
|
|
|
| trainer = Trainer(
|
| model=model,
|
| args=training_args,
|
| train_dataset=tokenized_dataset,
|
| data_collator=data_collator,
|
| optimizers=(optimizer, scheduler)
|
| )
|
|
|
|
|
| logger.info("Starting training...")
|
| trainer.train()
|
|
|
|
|
| model.save_pretrained("./gpt2-finetuned")
|
| tokenizer.save_pretrained("./gpt2-finetuned")
|
| logger.info("Training complete and model saved.")
|
|
|