| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | import argparse |
| | import os |
| | import re |
| |
|
| | import numpy as np |
| | import torch |
| | from torch.optim.lr_scheduler import OneCycleLR |
| | from torch.utils.data import DataLoader, Dataset |
| |
|
| | import PIL |
| | from accelerate import Accelerator |
| | from timm import create_model |
| | from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor |
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | |
| | def extract_label(fname): |
| | stem = fname.split(os.path.sep)[-1] |
| | return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] |
| |
|
| |
|
| | class PetsDataset(Dataset): |
| | def __init__(self, file_names, image_transform=None, label_to_id=None): |
| | self.file_names = file_names |
| | self.image_transform = image_transform |
| | self.label_to_id = label_to_id |
| |
|
| | def __len__(self): |
| | return len(self.file_names) |
| |
|
| | def __getitem__(self, idx): |
| | fname = self.file_names[idx] |
| | raw_image = PIL.Image.open(fname) |
| | image = raw_image.convert("RGB") |
| | if self.image_transform is not None: |
| | image = self.image_transform(image) |
| | label = extract_label(fname) |
| | if self.label_to_id is not None: |
| | label = self.label_to_id[label] |
| | return {"image": image, "label": label} |
| |
|
| |
|
| | def training_function(config, args): |
| | |
| | accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) |
| |
|
| | |
| | lr = config["lr"] |
| | num_epochs = int(config["num_epochs"]) |
| | seed = int(config["seed"]) |
| | batch_size = int(config["batch_size"]) |
| | image_size = config["image_size"] |
| | if not isinstance(image_size, (list, tuple)): |
| | image_size = (image_size, image_size) |
| |
|
| | |
| | file_names = [os.path.join(args.data_dir, fname) for fname in os.listdir(args.data_dir) if fname.endswith(".jpg")] |
| |
|
| | |
| | all_labels = [extract_label(fname) for fname in file_names] |
| | id_to_label = list(set(all_labels)) |
| | id_to_label.sort() |
| | label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} |
| |
|
| | |
| | np.random.seed(seed) |
| | torch.manual_seed(seed) |
| | torch.cuda.manual_seed_all(seed) |
| |
|
| | |
| | random_perm = np.random.permutation(len(file_names)) |
| | cut = int(0.8 * len(file_names)) |
| | train_split = random_perm[:cut] |
| | eval_split = random_perm[cut:] |
| |
|
| | |
| | train_tfm = Compose([RandomResizedCrop(image_size, scale=(0.5, 1.0)), ToTensor()]) |
| | train_dataset = PetsDataset( |
| | [file_names[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id |
| | ) |
| |
|
| | |
| | eval_tfm = Compose([Resize(image_size), ToTensor()]) |
| | eval_dataset = PetsDataset([file_names[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) |
| |
|
| | |
| | train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) |
| | eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size, num_workers=4) |
| |
|
| | |
| | model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) |
| |
|
| | |
| | |
| | |
| | model = model.to(accelerator.device) |
| |
|
| | |
| | for param in model.parameters(): |
| | param.requires_grad = False |
| | for param in model.get_classifier().parameters(): |
| | param.requires_grad = True |
| |
|
| | |
| | mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None].to(accelerator.device) |
| | std = torch.tensor(model.default_cfg["std"])[None, :, None, None].to(accelerator.device) |
| |
|
| | |
| | optimizer = torch.optim.Adam(params=model.parameters(), lr=lr / 25) |
| |
|
| | |
| | lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=lr, epochs=num_epochs, steps_per_epoch=len(train_dataloader)) |
| |
|
| | |
| | |
| | |
| | model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( |
| | model, optimizer, train_dataloader, eval_dataloader, lr_scheduler |
| | ) |
| |
|
| | |
| | for epoch in range(num_epochs): |
| | model.train() |
| | for step, batch in enumerate(train_dataloader): |
| | |
| | batch = {k: v.to(accelerator.device) for k, v in batch.items()} |
| | inputs = (batch["image"] - mean) / std |
| | outputs = model(inputs) |
| | loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) |
| | accelerator.backward(loss) |
| | optimizer.step() |
| | lr_scheduler.step() |
| | optimizer.zero_grad() |
| |
|
| | model.eval() |
| | accurate = 0 |
| | num_elems = 0 |
| | for _, batch in enumerate(eval_dataloader): |
| | |
| | batch = {k: v.to(accelerator.device) for k, v in batch.items()} |
| | inputs = (batch["image"] - mean) / std |
| | with torch.no_grad(): |
| | outputs = model(inputs) |
| | predictions = outputs.argmax(dim=-1) |
| | predictions, references = accelerator.gather_for_metrics((predictions, batch["label"])) |
| | accurate_preds = predictions == references |
| | num_elems += accurate_preds.shape[0] |
| | accurate += accurate_preds.long().sum() |
| |
|
| | eval_metric = accurate.item() / num_elems |
| | |
| | accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser(description="Simple example of training script.") |
| | parser.add_argument("--data_dir", required=True, help="The data folder on disk.") |
| | parser.add_argument( |
| | "--mixed_precision", |
| | type=str, |
| | default="no", |
| | choices=["no", "fp16", "bf16"], |
| | help="Whether to use mixed precision. Choose" |
| | "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." |
| | "and an Nvidia Ampere GPU.", |
| | ) |
| | parser.add_argument( |
| | "--checkpointing_steps", |
| | type=str, |
| | default=None, |
| | help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", |
| | ) |
| | parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") |
| | args = parser.parse_args() |
| | config = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224} |
| | training_function(config, args) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|