| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import json |
| | import logging |
| | import os |
| | import sys |
| | from dataclasses import dataclass, field |
| | from functools import partial |
| |
|
| | import albumentations as A |
| | import evaluate |
| | import numpy as np |
| | import torch |
| | from albumentations.pytorch import ToTensorV2 |
| | from datasets import load_dataset |
| | from huggingface_hub import hf_hub_download |
| | from torch import nn |
| |
|
| | import transformers |
| | from transformers import ( |
| | AutoConfig, |
| | AutoImageProcessor, |
| | AutoModelForSemanticSegmentation, |
| | HfArgumentParser, |
| | Trainer, |
| | TrainingArguments, |
| | default_data_collator, |
| | ) |
| | from transformers.utils import check_min_version |
| | from transformers.utils.versions import require_version |
| |
|
| |
|
| | """ Finetuning any 🤗 Transformers model supported by AutoModelForSemanticSegmentation for semantic segmentation leveraging the Trainer API.""" |
| |
|
| | logger = logging.getLogger(__name__) |
| |
|
| | |
| | check_min_version("4.57.0.dev0") |
| |
|
| | require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") |
| |
|
| |
|
| | def reduce_labels_transform(labels: np.ndarray, **kwargs) -> np.ndarray: |
| | """Set `0` label as with value 255 and then reduce all other labels by 1. |
| | |
| | Example: |
| | Initial class labels: 0 - background; 1 - road; 2 - car; |
| | Transformed class labels: 255 - background; 0 - road; 1 - car; |
| | |
| | **kwargs are required to use this function with albumentations. |
| | """ |
| | labels[labels == 0] = 255 |
| | labels = labels - 1 |
| | labels[labels == 254] = 255 |
| | return labels |
| |
|
| |
|
| | @dataclass |
| | class DataTrainingArguments: |
| | """ |
| | Arguments pertaining to what data we are going to input our model for training and eval. |
| | Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify |
| | them on the command line. |
| | """ |
| |
|
| | dataset_name: str | None = field( |
| | default="segments/sidewalk-semantic", |
| | metadata={ |
| | "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." |
| | }, |
| | ) |
| | dataset_config_name: str | None = field( |
| | default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| | ) |
| | train_val_split: float | None = field( |
| | default=0.15, metadata={"help": "Percent to split off of train for validation."} |
| | ) |
| | max_train_samples: int | None = field( |
| | default=None, |
| | metadata={ |
| | "help": ( |
| | "For debugging purposes or quicker training, truncate the number of training examples to this " |
| | "value if set." |
| | ) |
| | }, |
| | ) |
| | max_eval_samples: int | None = field( |
| | default=None, |
| | metadata={ |
| | "help": ( |
| | "For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
| | "value if set." |
| | ) |
| | }, |
| | ) |
| | do_reduce_labels: bool | None = field( |
| | default=False, |
| | metadata={"help": "Whether or not to reduce all labels by 1 and replace background by 255."}, |
| | ) |
| |
|
| | def __post_init__(self): |
| | if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): |
| | raise ValueError( |
| | "You must specify either a dataset name from the hub or a train and/or validation directory." |
| | ) |
| |
|
| |
|
| | @dataclass |
| | class ModelArguments: |
| | """ |
| | Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| | """ |
| |
|
| | model_name_or_path: str = field( |
| | default="nvidia/mit-b0", |
| | metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, |
| | ) |
| | config_name: str | None = field( |
| | default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| | ) |
| | cache_dir: str | None = field( |
| | default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} |
| | ) |
| | model_revision: str = field( |
| | default="main", |
| | metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| | ) |
| | image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) |
| | token: str = field( |
| | default=None, |
| | metadata={ |
| | "help": ( |
| | "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| | "generated when running `hf auth login` (stored in `~/.huggingface`)." |
| | ) |
| | }, |
| | ) |
| | trust_remote_code: bool = field( |
| | default=False, |
| | metadata={ |
| | "help": ( |
| | "Whether to trust the execution of code from datasets/models defined on the Hub." |
| | " This option should only be set to `True` for repositories you trust and in which you have read the" |
| | " code, as it will execute code present on the Hub on your local machine." |
| | ) |
| | }, |
| | ) |
| |
|
| |
|
| | def main(): |
| | |
| | |
| | |
| |
|
| | parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) |
| | if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| | |
| | |
| | model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| | else: |
| | model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
| |
|
| | |
| | logging.basicConfig( |
| | format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| | datefmt="%m/%d/%Y %H:%M:%S", |
| | handlers=[logging.StreamHandler(sys.stdout)], |
| | ) |
| |
|
| | if training_args.should_log: |
| | |
| | transformers.utils.logging.set_verbosity_info() |
| |
|
| | log_level = training_args.get_process_log_level() |
| | logger.setLevel(log_level) |
| | transformers.utils.logging.set_verbosity(log_level) |
| | transformers.utils.logging.enable_default_handler() |
| | transformers.utils.logging.enable_explicit_format() |
| |
|
| | |
| | logger.warning( |
| | f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " |
| | + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" |
| | ) |
| | logger.info(f"Training/evaluation parameters {training_args}") |
| |
|
| | |
| | |
| | |
| | |
| | dataset = load_dataset( |
| | data_args.dataset_name, cache_dir=model_args.cache_dir, trust_remote_code=model_args.trust_remote_code |
| | ) |
| |
|
| | |
| | if "pixel_values" in dataset["train"].column_names: |
| | dataset = dataset.rename_columns({"pixel_values": "image"}) |
| | if "annotation" in dataset["train"].column_names: |
| | dataset = dataset.rename_columns({"annotation": "label"}) |
| |
|
| | |
| | data_args.train_val_split = None if "validation" in dataset else data_args.train_val_split |
| | if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: |
| | split = dataset["train"].train_test_split(data_args.train_val_split) |
| | dataset["train"] = split["train"] |
| | dataset["validation"] = split["test"] |
| |
|
| | |
| | |
| | if data_args.dataset_name == "scene_parse_150": |
| | repo_id = "huggingface/label-files" |
| | filename = "ade20k-id2label.json" |
| | else: |
| | repo_id = data_args.dataset_name |
| | filename = "id2label.json" |
| | id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"))) |
| | id2label = {int(k): v for k, v in id2label.items()} |
| | label2id = {v: str(k) for k, v in id2label.items()} |
| |
|
| | |
| | metric = evaluate.load("mean_iou", cache_dir=model_args.cache_dir) |
| |
|
| | |
| | |
| | @torch.no_grad() |
| | def compute_metrics(eval_pred): |
| | logits, labels = eval_pred |
| | logits_tensor = torch.from_numpy(logits) |
| | |
| | logits_tensor = nn.functional.interpolate( |
| | logits_tensor, |
| | size=labels.shape[-2:], |
| | mode="bilinear", |
| | align_corners=False, |
| | ).argmax(dim=1) |
| |
|
| | pred_labels = logits_tensor.detach().cpu().numpy() |
| | metrics = metric.compute( |
| | predictions=pred_labels, |
| | references=labels, |
| | num_labels=len(id2label), |
| | ignore_index=0, |
| | reduce_labels=image_processor.do_reduce_labels, |
| | ) |
| | |
| | per_category_accuracy = metrics.pop("per_category_accuracy").tolist() |
| | per_category_iou = metrics.pop("per_category_iou").tolist() |
| |
|
| | metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) |
| | metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) |
| |
|
| | return metrics |
| |
|
| | config = AutoConfig.from_pretrained( |
| | model_args.config_name or model_args.model_name_or_path, |
| | label2id=label2id, |
| | id2label=id2label, |
| | cache_dir=model_args.cache_dir, |
| | revision=model_args.model_revision, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ) |
| | model = AutoModelForSemanticSegmentation.from_pretrained( |
| | model_args.model_name_or_path, |
| | from_tf=bool(".ckpt" in model_args.model_name_or_path), |
| | config=config, |
| | cache_dir=model_args.cache_dir, |
| | revision=model_args.model_revision, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ) |
| | image_processor = AutoImageProcessor.from_pretrained( |
| | model_args.image_processor_name or model_args.model_name_or_path, |
| | do_reduce_labels=data_args.do_reduce_labels, |
| | cache_dir=model_args.cache_dir, |
| | revision=model_args.model_revision, |
| | token=model_args.token, |
| | trust_remote_code=model_args.trust_remote_code, |
| | ) |
| |
|
| | |
| | if "shortest_edge" in image_processor.size: |
| | |
| | height, width = image_processor.size["shortest_edge"], image_processor.size["shortest_edge"] |
| | else: |
| | height, width = image_processor.size["height"], image_processor.size["width"] |
| | train_transforms = A.Compose( |
| | [ |
| | A.Lambda( |
| | name="reduce_labels", |
| | mask=reduce_labels_transform if data_args.do_reduce_labels else None, |
| | p=1.0, |
| | ), |
| | |
| | A.PadIfNeeded(min_height=height, min_width=width, border_mode=0, value=255, p=1.0), |
| | A.RandomCrop(height=height, width=width, p=1.0), |
| | A.HorizontalFlip(p=0.5), |
| | A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), |
| | ToTensorV2(), |
| | ] |
| | ) |
| | val_transforms = A.Compose( |
| | [ |
| | A.Lambda( |
| | name="reduce_labels", |
| | mask=reduce_labels_transform if data_args.do_reduce_labels else None, |
| | p=1.0, |
| | ), |
| | A.Resize(height=height, width=width, p=1.0), |
| | A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), |
| | ToTensorV2(), |
| | ] |
| | ) |
| |
|
| | def preprocess_batch(example_batch, transforms: A.Compose): |
| | pixel_values = [] |
| | labels = [] |
| | for image, target in zip(example_batch["image"], example_batch["label"]): |
| | transformed = transforms(image=np.array(image.convert("RGB")), mask=np.array(target)) |
| | pixel_values.append(transformed["image"]) |
| | labels.append(transformed["mask"]) |
| |
|
| | encoding = {} |
| | encoding["pixel_values"] = torch.stack(pixel_values).to(torch.float) |
| | encoding["labels"] = torch.stack(labels).to(torch.long) |
| |
|
| | return encoding |
| |
|
| | |
| | |
| | preprocess_train_batch_fn = partial(preprocess_batch, transforms=train_transforms) |
| | preprocess_val_batch_fn = partial(preprocess_batch, transforms=val_transforms) |
| |
|
| | if training_args.do_train: |
| | if "train" not in dataset: |
| | raise ValueError("--do_train requires a train dataset") |
| | if data_args.max_train_samples is not None: |
| | dataset["train"] = ( |
| | dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) |
| | ) |
| | |
| | dataset["train"].set_transform(preprocess_train_batch_fn) |
| |
|
| | if training_args.do_eval: |
| | if "validation" not in dataset: |
| | raise ValueError("--do_eval requires a validation dataset") |
| | if data_args.max_eval_samples is not None: |
| | dataset["validation"] = ( |
| | dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) |
| | ) |
| | |
| | dataset["validation"].set_transform(preprocess_val_batch_fn) |
| |
|
| | |
| | trainer = Trainer( |
| | model=model, |
| | args=training_args, |
| | train_dataset=dataset["train"] if training_args.do_train else None, |
| | eval_dataset=dataset["validation"] if training_args.do_eval else None, |
| | compute_metrics=compute_metrics, |
| | processing_class=image_processor, |
| | data_collator=default_data_collator, |
| | ) |
| |
|
| | |
| | if training_args.do_train: |
| | checkpoint = None |
| | if training_args.resume_from_checkpoint is not None: |
| | checkpoint = training_args.resume_from_checkpoint |
| | train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| | trainer.save_model() |
| | trainer.log_metrics("train", train_result.metrics) |
| | trainer.save_metrics("train", train_result.metrics) |
| | trainer.save_state() |
| |
|
| | |
| | if training_args.do_eval: |
| | metrics = trainer.evaluate() |
| | trainer.log_metrics("eval", metrics) |
| | trainer.save_metrics("eval", metrics) |
| |
|
| | |
| | kwargs = { |
| | "finetuned_from": model_args.model_name_or_path, |
| | "dataset": data_args.dataset_name, |
| | "tags": ["image-segmentation", "vision"], |
| | } |
| | if training_args.push_to_hub: |
| | trainer.push_to_hub(**kwargs) |
| | else: |
| | trainer.create_model_card(**kwargs) |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|