| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| import argparse |
| import os |
|
|
| import torch |
| from torch.optim import AdamW |
| from torch.utils.data import DataLoader |
|
|
| |
| import evaluate |
| from accelerate import Accelerator, DistributedType |
| from accelerate.utils import find_executable_batch_size |
| from datasets import load_dataset |
| from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| MAX_GPU_BATCH_SIZE = 16 |
| EVAL_BATCH_SIZE = 32 |
|
|
|
|
| def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): |
| """ |
| Creates a set of `DataLoader`s for the `glue` dataset, |
| using "bert-base-cased" as the tokenizer. |
| |
| Args: |
| accelerator (`Accelerator`): |
| An `Accelerator` object |
| batch_size (`int`, *optional*): |
| The batch size for the train and validation DataLoaders. |
| """ |
| tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") |
| datasets = load_dataset("glue", "mrpc") |
|
|
| def tokenize_function(examples): |
| |
| outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) |
| return outputs |
|
|
| |
| |
| with accelerator.main_process_first(): |
| tokenized_datasets = datasets.map( |
| tokenize_function, |
| batched=True, |
| remove_columns=["idx", "sentence1", "sentence2"], |
| ) |
|
|
| |
| |
| tokenized_datasets = tokenized_datasets.rename_column("label", "labels") |
|
|
| def collate_fn(examples): |
| |
| if accelerator.distributed_type == DistributedType.TPU: |
| return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") |
| return tokenizer.pad(examples, padding="longest", return_tensors="pt") |
|
|
| |
| train_dataloader = DataLoader( |
| tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size |
| ) |
| eval_dataloader = DataLoader( |
| tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE |
| ) |
|
|
| return train_dataloader, eval_dataloader |
|
|
|
|
| |
| if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": |
| from accelerate.test_utils.training import mocked_dataloaders |
|
|
| get_dataloaders = mocked_dataloaders |
|
|
|
|
| def training_function(config, args): |
| |
| if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": |
| config["num_epochs"] = 2 |
| |
| accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) |
| |
| lr = config["lr"] |
| num_epochs = int(config["num_epochs"]) |
| seed = int(config["seed"]) |
| batch_size = int(config["batch_size"]) |
|
|
| metric = evaluate.load("glue", "mrpc") |
|
|
| |
| |
| |
| |
| @find_executable_batch_size(starting_batch_size=batch_size) |
| def inner_training_loop(batch_size): |
| |
| |
| nonlocal accelerator |
| |
| accelerator.free_memory() |
|
|
| |
| set_seed(seed) |
|
|
| |
| model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) |
|
|
| |
| |
| |
| model = model.to(accelerator.device) |
|
|
| |
| optimizer = AdamW(params=model.parameters(), lr=lr) |
| train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) |
|
|
| |
| lr_scheduler = get_linear_schedule_with_warmup( |
| optimizer=optimizer, |
| num_warmup_steps=100, |
| num_training_steps=(len(train_dataloader) * num_epochs), |
| ) |
|
|
| |
| |
| |
| model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( |
| model, optimizer, train_dataloader, eval_dataloader, lr_scheduler |
| ) |
|
|
| |
| for epoch in range(num_epochs): |
| model.train() |
| for step, batch in enumerate(train_dataloader): |
| |
| batch.to(accelerator.device) |
| outputs = model(**batch) |
| loss = outputs.loss |
| accelerator.backward(loss) |
| optimizer.step() |
| lr_scheduler.step() |
| optimizer.zero_grad() |
|
|
| model.eval() |
| for step, batch in enumerate(eval_dataloader): |
| |
| batch.to(accelerator.device) |
| with torch.no_grad(): |
| outputs = model(**batch) |
| predictions = outputs.logits.argmax(dim=-1) |
| predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) |
| metric.add_batch( |
| predictions=predictions, |
| references=references, |
| ) |
|
|
| eval_metric = metric.compute() |
| |
| accelerator.print(f"epoch {epoch}:", eval_metric) |
|
|
| |
| |
| |
| inner_training_loop() |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Simple example of training script.") |
| parser.add_argument( |
| "--mixed_precision", |
| type=str, |
| default="no", |
| choices=["no", "fp16", "bf16"], |
| help="Whether to use mixed precision. Choose" |
| "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." |
| "and an Nvidia Ampere GPU.", |
| ) |
| parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") |
| args = parser.parse_args() |
| config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} |
| training_function(config, args) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|