| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ |
| Training a CLIP like dual encoder models using text and vision encoders in the library. |
| |
| The script can be used to train CLIP like models for languages other than English by using |
| a text encoder pre-trained in the desired language. Currently this script supports the following vision |
| and text models: |
| Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip) |
| Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask) |
| """ |
|
|
| import logging |
| import os |
| import sys |
| from dataclasses import dataclass, field |
| from typing import Optional |
|
|
| import torch |
| from datasets import load_dataset |
| from PIL import Image |
| from torchvision.io import ImageReadMode, read_image |
| from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize |
| from torchvision.transforms.functional import InterpolationMode |
|
|
| import transformers |
| from transformers import ( |
| AutoImageProcessor, |
| AutoModel, |
| AutoTokenizer, |
| HfArgumentParser, |
| Trainer, |
| TrainingArguments, |
| set_seed, |
| ) |
| from transformers.trainer_utils import get_last_checkpoint |
| from transformers.utils import check_min_version, send_example_telemetry |
| from transformers.utils.versions import require_version |
|
|
|
|
| logger = logging.getLogger(__name__) |
|
|
| |
| check_min_version("4.52.0.dev0") |
|
|
| require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt") |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, |
| ) |
| config_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
| ) |
| image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) |
| cache_dir: Optional[str] = field( |
| default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
| ) |
| use_fast_tokenizer: bool = field( |
| default=True, |
| metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, |
| ) |
| token: str = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " |
| "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." |
| ) |
| }, |
| ) |
| trust_remote_code: bool = field( |
| default=False, |
| metadata={ |
| "help": ( |
| "Whether to trust the execution of code from datasets/models defined on the Hub." |
| " This option should only be set to `True` for repositories you trust and in which you have read the" |
| " code, as it will execute code present on the Hub on your local machine." |
| ) |
| }, |
| ) |
| freeze_vision_model: bool = field( |
| default=False, metadata={"help": "Whether to freeze the vision model parameters or not."} |
| ) |
| freeze_text_model: bool = field( |
| default=False, metadata={"help": "Whether to freeze the text model parameters or not."} |
| ) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| """ |
|
|
| dataset_name: Optional[str] = field( |
| default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} |
| ) |
| dataset_config_name: Optional[str] = field( |
| default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
| ) |
| data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."}) |
| image_column: Optional[str] = field( |
| default="image_path", |
| metadata={"help": "The name of the column in the datasets containing the full image file paths."}, |
| ) |
| caption_column: Optional[str] = field( |
| default="caption", |
| metadata={"help": "The name of the column in the datasets containing the image captions."}, |
| ) |
| train_file: Optional[str] = field( |
| default=None, metadata={"help": "The input training data file (a jsonlines file)."} |
| ) |
| validation_file: Optional[str] = field( |
| default=None, |
| metadata={"help": "An optional input evaluation data file (a jsonlines file)."}, |
| ) |
| max_seq_length: Optional[int] = field( |
| default=128, |
| metadata={ |
| "help": ( |
| "The maximum total input sequence length after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| ) |
| }, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| max_eval_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": ( |
| "For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
| "value if set." |
| ) |
| }, |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
| ) |
| preprocessing_num_workers: Optional[int] = field( |
| default=None, |
| metadata={"help": "The number of processes to use for the preprocessing."}, |
| ) |
|
|
| def __post_init__(self): |
| if self.dataset_name is None and self.train_file is None and self.validation_file is None: |
| raise ValueError("Need either a dataset name or a training/validation file.") |
| else: |
| if self.train_file is not None: |
| extension = self.train_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." |
| if self.validation_file is not None: |
| extension = self.validation_file.split(".")[-1] |
| assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." |
|
|
|
|
| dataset_name_mapping = { |
| "image_caption_dataset.py": ("image_path", "caption"), |
| } |
|
|
|
|
| |
| |
| class Transform(torch.nn.Module): |
| def __init__(self, image_size, mean, std): |
| super().__init__() |
| self.transforms = torch.nn.Sequential( |
| Resize([image_size], interpolation=InterpolationMode.BICUBIC), |
| CenterCrop(image_size), |
| ConvertImageDtype(torch.float), |
| Normalize(mean, std), |
| ) |
|
|
| def forward(self, x) -> torch.Tensor: |
| """`x` should be an instance of `PIL.Image.Image`""" |
| with torch.no_grad(): |
| x = self.transforms(x) |
| return x |
|
|
|
|
| def collate_fn(examples): |
| pixel_values = torch.stack([example["pixel_values"] for example in examples]) |
| input_ids = torch.tensor([example["input_ids"] for example in examples], dtype=torch.long) |
| attention_mask = torch.tensor([example["attention_mask"] for example in examples], dtype=torch.long) |
| return { |
| "pixel_values": pixel_values, |
| "input_ids": input_ids, |
| "attention_mask": attention_mask, |
| "return_loss": True, |
| } |
|
|
|
|
| def main(): |
| |
| |
| |
| |
|
|
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) |
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| |
| send_example_telemetry("run_clip", model_args, data_args) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
|
|
| if training_args.should_log: |
| |
| transformers.utils.logging.set_verbosity_info() |
|
|
| log_level = training_args.get_process_log_level() |
| logger.setLevel(log_level) |
| transformers.utils.logging.set_verbosity(log_level) |
| transformers.utils.logging.enable_default_handler() |
| transformers.utils.logging.enable_explicit_format() |
|
|
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " |
| + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" |
| ) |
| logger.info(f"Training/evaluation parameters {training_args}") |
|
|
| |
| last_checkpoint = None |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| if data_args.dataset_name is not None: |
| |
| dataset = load_dataset( |
| data_args.dataset_name, |
| data_args.dataset_config_name, |
| cache_dir=model_args.cache_dir, |
| keep_in_memory=False, |
| data_dir=data_args.data_dir, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| else: |
| data_files = {} |
| if data_args.train_file is not None: |
| data_files["train"] = data_args.train_file |
| extension = data_args.train_file.split(".")[-1] |
| if data_args.validation_file is not None: |
| data_files["validation"] = data_args.validation_file |
| extension = data_args.validation_file.split(".")[-1] |
| dataset = load_dataset( |
| extension, |
| data_files=data_files, |
| cache_dir=model_args.cache_dir, |
| token=model_args.token, |
| ) |
| |
| |
|
|
| |
| if model_args.tokenizer_name: |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_args.tokenizer_name, |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| elif model_args.model_name_or_path: |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| else: |
| raise ValueError( |
| "You are instantiating a new tokenizer from scratch. This is not supported by this script. " |
| "You can do it from another script, save it, and load it from here, using --tokenizer_name." |
| ) |
|
|
| |
| image_processor = AutoImageProcessor.from_pretrained( |
| model_args.image_processor_name or model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
|
|
| model = AutoModel.from_pretrained( |
| model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| token=model_args.token, |
| trust_remote_code=model_args.trust_remote_code, |
| ) |
| config = model.config |
|
|
| def _freeze_params(module): |
| for param in module.parameters(): |
| param.requires_grad = False |
|
|
| if model_args.freeze_vision_model: |
| _freeze_params(model.vision_model) |
|
|
| if model_args.freeze_text_model: |
| _freeze_params(model.text_model) |
|
|
| |
| set_seed(training_args.seed) |
|
|
| |
| |
| if training_args.do_train: |
| column_names = dataset["train"].column_names |
| elif training_args.do_eval: |
| column_names = dataset["validation"].column_names |
| else: |
| logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") |
| return |
|
|
| |
| dataset_columns = dataset_name_mapping.get(data_args.dataset_name, None) |
| if data_args.image_column is None: |
| image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] |
| else: |
| image_column = data_args.image_column |
| if image_column not in column_names: |
| raise ValueError( |
| f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}" |
| ) |
| if data_args.caption_column is None: |
| caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] |
| else: |
| caption_column = data_args.caption_column |
| if caption_column not in column_names: |
| raise ValueError( |
| f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}" |
| ) |
|
|
| |
| |
| image_transformations = Transform( |
| config.vision_config.image_size, image_processor.image_mean, image_processor.image_std |
| ) |
| image_transformations = torch.jit.script(image_transformations) |
|
|
| |
| |
| def tokenize_captions(examples): |
| captions = list(examples[caption_column]) |
| text_inputs = tokenizer(captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True) |
| examples["input_ids"] = text_inputs.input_ids |
| examples["attention_mask"] = text_inputs.attention_mask |
| return examples |
|
|
| def transform_images(examples): |
| images = [read_image(image_file, mode=ImageReadMode.RGB) for image_file in examples[image_column]] |
| examples["pixel_values"] = [image_transformations(image) for image in images] |
| return examples |
|
|
| def filter_corrupt_images(examples): |
| """remove problematic images""" |
| valid_images = [] |
| for image_file in examples[image_column]: |
| try: |
| Image.open(image_file) |
| valid_images.append(True) |
| except Exception: |
| valid_images.append(False) |
| return valid_images |
|
|
| if training_args.do_train: |
| if "train" not in dataset: |
| raise ValueError("--do_train requires a train dataset") |
| train_dataset = dataset["train"] |
| if data_args.max_train_samples is not None: |
| max_train_samples = min(len(train_dataset), data_args.max_train_samples) |
| train_dataset = train_dataset.select(range(max_train_samples)) |
|
|
| train_dataset = train_dataset.filter( |
| filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers |
| ) |
| train_dataset = train_dataset.map( |
| function=tokenize_captions, |
| batched=True, |
| remove_columns=[col for col in column_names if col != image_column], |
| num_proc=data_args.preprocessing_num_workers, |
| load_from_cache_file=not data_args.overwrite_cache, |
| desc="Running tokenizer on train dataset", |
| ) |
|
|
| |
| train_dataset.set_transform(transform_images) |
|
|
| if training_args.do_eval: |
| if "validation" not in dataset: |
| raise ValueError("--do_eval requires a train validation") |
| eval_dataset = dataset["validation"] |
| if data_args.max_eval_samples is not None: |
| max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) |
| eval_dataset = eval_dataset.select(range(max_eval_samples)) |
|
|
| eval_dataset = eval_dataset.filter( |
| filter_corrupt_images, batched=True, num_proc=data_args.preprocessing_num_workers |
| ) |
| eval_dataset = eval_dataset.map( |
| function=tokenize_captions, |
| batched=True, |
| num_proc=data_args.preprocessing_num_workers, |
| remove_columns=[col for col in column_names if col != image_column], |
| load_from_cache_file=not data_args.overwrite_cache, |
| desc="Running tokenizer on validation dataset", |
| ) |
|
|
| |
| eval_dataset.set_transform(transform_images) |
|
|
| |
| trainer = Trainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset if training_args.do_train else None, |
| eval_dataset=eval_dataset if training_args.do_eval else None, |
| data_collator=collate_fn, |
| ) |
|
|
| |
| if training_args.do_train: |
| checkpoint = None |
| if training_args.resume_from_checkpoint is not None: |
| checkpoint = training_args.resume_from_checkpoint |
| elif last_checkpoint is not None: |
| checkpoint = last_checkpoint |
| train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| trainer.save_model() |
| tokenizer.save_pretrained(training_args.output_dir) |
| image_processor.save_pretrained(training_args.output_dir) |
| trainer.log_metrics("train", train_result.metrics) |
| trainer.save_metrics("train", train_result.metrics) |
| trainer.save_state() |
|
|
| |
| if training_args.do_eval: |
| metrics = trainer.evaluate() |
| trainer.log_metrics("eval", metrics) |
| trainer.save_metrics("eval", metrics) |
|
|
| |
| finetuned_from = model_args.model_name_or_path |
| |
| if os.path.isdir(finetuned_from): |
| finetuned_from = None |
| kwargs = {"finetuned_from": finetuned_from, "tasks": "contrastive-image-text-modeling"} |
| if data_args.dataset_name is not None: |
| kwargs["dataset_tags"] = data_args.dataset_name |
| if data_args.dataset_config_name is not None: |
| kwargs["dataset_args"] = data_args.dataset_config_name |
| kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" |
| else: |
| kwargs["dataset"] = data_args.dataset_name |
|
|
| if training_args.push_to_hub: |
| trainer.push_to_hub(**kwargs) |
| else: |
| trainer.create_model_card(**kwargs) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|