| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import copy |
| | import gc |
| | import glob |
| | import inspect |
| | import json |
| | import os |
| | import os.path |
| | import pickle |
| | import random |
| | import sys |
| | import tempfile |
| | import unittest |
| | import unittest.mock as mock |
| | import warnings |
| | from pathlib import Path |
| | from typing import Dict, List, Tuple |
| |
|
| | import numpy as np |
| | from huggingface_hub import HfFolder, delete_repo |
| | from huggingface_hub.file_download import http_get |
| | from pytest import mark |
| | from requests.exceptions import HTTPError |
| |
|
| | import transformers |
| | from transformers import ( |
| | AutoConfig, |
| | AutoModel, |
| | AutoModelForSequenceClassification, |
| | PretrainedConfig, |
| | is_torch_available, |
| | logging, |
| | ) |
| | from transformers.models.auto import get_values |
| | from transformers.models.auto.modeling_auto import ( |
| | MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, |
| | MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, |
| | MODEL_FOR_BACKBONE_MAPPING_NAMES, |
| | MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES, |
| | MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, |
| | MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, |
| | MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, |
| | MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, |
| | MODEL_FOR_MASKED_LM_MAPPING_NAMES, |
| | MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, |
| | MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, |
| | MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, |
| | MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, |
| | MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, |
| | MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, |
| | MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, |
| | MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, |
| | MODEL_MAPPING_NAMES, |
| | ) |
| | from transformers.testing_utils import ( |
| | TOKEN, |
| | USER, |
| | CaptureLogger, |
| | TestCasePlus, |
| | is_pt_flax_cross_test, |
| | is_pt_tf_cross_test, |
| | is_staging_test, |
| | require_accelerate, |
| | require_safetensors, |
| | require_torch, |
| | require_torch_gpu, |
| | require_torch_multi_gpu, |
| | require_usr_bin_time, |
| | slow, |
| | torch_device, |
| | ) |
| | from transformers.utils import ( |
| | CONFIG_NAME, |
| | GENERATION_CONFIG_NAME, |
| | SAFE_WEIGHTS_INDEX_NAME, |
| | SAFE_WEIGHTS_NAME, |
| | WEIGHTS_INDEX_NAME, |
| | WEIGHTS_NAME, |
| | is_accelerate_available, |
| | is_flax_available, |
| | is_tf_available, |
| | is_torch_fx_available, |
| | ) |
| | from transformers.utils.generic import ModelOutput |
| |
|
| |
|
| | sys.path.append(str(Path(__file__).parent.parent / "utils")) |
| |
|
| | from test_module.custom_configuration import CustomConfig, NoSuperInitConfig |
| |
|
| |
|
| | if is_accelerate_available(): |
| | from accelerate.utils import compute_module_sizes |
| |
|
| |
|
| | if is_torch_available(): |
| | import torch |
| | from test_module.custom_modeling import CustomModel, NoSuperInitModel |
| | from torch import nn |
| |
|
| | from transformers import ( |
| | BERT_PRETRAINED_MODEL_ARCHIVE_LIST, |
| | MODEL_MAPPING, |
| | AdaptiveEmbedding, |
| | AutoModelForCausalLM, |
| | AutoTokenizer, |
| | BertConfig, |
| | BertModel, |
| | CLIPTextModel, |
| | PreTrainedModel, |
| | T5Config, |
| | T5ForConditionalGeneration, |
| | ) |
| | from transformers.modeling_utils import shard_checkpoint |
| |
|
| | |
| | class BaseModel(PreTrainedModel): |
| | config_class = PretrainedConfig |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.linear = nn.Linear(4, 5) |
| | self.linear_2 = nn.Linear(5, 6) |
| |
|
| | def forward(self, x): |
| | return self.linear_2(self.linear(x)) |
| |
|
| | class ModelWithHead(PreTrainedModel): |
| | base_model_prefix = "base" |
| | config_class = PretrainedConfig |
| |
|
| | def _init_weights(self, module): |
| | pass |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.base = BaseModel(config) |
| | |
| | self.linear = nn.Linear(6, 3) |
| | self.linear2 = nn.Linear(3, 5) |
| |
|
| | def forward(self, x): |
| | return self.linear2(self.linear(self.base(x))) |
| |
|
| |
|
| | if is_tf_available(): |
| | import tensorflow as tf |
| |
|
| | if is_flax_available(): |
| | import jax.numpy as jnp |
| |
|
| | from transformers.modeling_flax_pytorch_utils import ( |
| | convert_pytorch_state_dict_to_flax, |
| | load_flax_weights_in_pytorch_model, |
| | ) |
| |
|
| | if is_torch_fx_available(): |
| | from transformers.utils.fx import symbolic_trace |
| |
|
| |
|
| | def _config_zero_init(config): |
| | configs_no_init = copy.deepcopy(config) |
| | for key in configs_no_init.__dict__.keys(): |
| | if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: |
| | setattr(configs_no_init, key, 1e-10) |
| | if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): |
| | no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) |
| | setattr(configs_no_init, key, no_init_subconfig) |
| | return configs_no_init |
| |
|
| |
|
| | TINY_T5 = "patrickvonplaten/t5-tiny-random" |
| | TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" |
| |
|
| |
|
| | def _mock_init_weights(self, module): |
| | for name, param in module.named_parameters(recurse=False): |
| | |
| | value = ord(name[0].lower()) - 110 |
| | param.data.fill_(value) |
| |
|
| |
|
| | def _mock_all_init_weights(self): |
| | |
| | if self.config.pruned_heads: |
| | self.prune_heads(self.config.pruned_heads) |
| |
|
| | import transformers.modeling_utils |
| |
|
| | if transformers.modeling_utils._init_weights: |
| | for module in self.modules(): |
| | module._is_hf_initialized = False |
| | |
| | self.apply(self._initialize_weights) |
| |
|
| | |
| | |
| | self.tie_weights() |
| |
|
| |
|
| | @require_torch |
| | class ModelTesterMixin: |
| | model_tester = None |
| | all_model_classes = () |
| | all_generative_model_classes = () |
| | fx_compatible = False |
| | test_torchscript = True |
| | test_pruning = True |
| | test_resize_embeddings = True |
| | test_resize_position_embeddings = False |
| | test_head_masking = True |
| | test_mismatched_shapes = True |
| | test_missing_keys = True |
| | test_model_parallel = False |
| | is_encoder_decoder = False |
| | has_attentions = True |
| | model_split_percents = [0.5, 0.7, 0.9] |
| |
|
| | def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): |
| | inputs_dict = copy.deepcopy(inputs_dict) |
| | if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): |
| | inputs_dict = { |
| | k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() |
| | if isinstance(v, torch.Tensor) and v.ndim > 1 |
| | else v |
| | for k, v in inputs_dict.items() |
| | } |
| | elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES): |
| | inputs_dict.pop("attention_mask") |
| |
|
| | if return_labels: |
| | if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): |
| | inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) |
| | elif model_class.__name__ in [ |
| | *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), |
| | ]: |
| | inputs_dict["start_positions"] = torch.zeros( |
| | self.model_tester.batch_size, dtype=torch.long, device=torch_device |
| | ) |
| | inputs_dict["end_positions"] = torch.zeros( |
| | self.model_tester.batch_size, dtype=torch.long, device=torch_device |
| | ) |
| | elif model_class.__name__ in [ |
| | *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), |
| | ]: |
| | inputs_dict["labels"] = torch.zeros( |
| | self.model_tester.batch_size, dtype=torch.long, device=torch_device |
| | ) |
| | elif model_class.__name__ in [ |
| | *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), |
| | ]: |
| | inputs_dict["labels"] = torch.zeros( |
| | (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device |
| | ) |
| | elif model_class.__name__ in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES): |
| | num_patches = self.model_tester.image_size // self.model_tester.patch_size |
| | inputs_dict["bool_masked_pos"] = torch.zeros( |
| | (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device |
| | ) |
| | elif model_class.__name__ in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES): |
| | batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape |
| | inputs_dict["labels"] = torch.zeros( |
| | [self.model_tester.batch_size, height, width], device=torch_device |
| | ).long() |
| |
|
| | return inputs_dict |
| |
|
| | def test_save_load(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | def check_save_load(out1, out2): |
| | |
| | out_2 = out2.cpu().numpy() |
| | out_2[np.isnan(out_2)] = 0 |
| |
|
| | out_1 = out1.cpu().numpy() |
| | out_1[np.isnan(out_1)] = 0 |
| | max_diff = np.amax(np.abs(out_1 - out_2)) |
| | self.assertLessEqual(max_diff, 1e-5) |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| | with torch.no_grad(): |
| | first = model(**self._prepare_for_class(inputs_dict, model_class))[0] |
| |
|
| | with tempfile.TemporaryDirectory() as tmpdirname: |
| | model.save_pretrained(tmpdirname) |
| |
|
| | |
| | self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) |
| | self.assertEqual( |
| | model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) |
| | ) |
| |
|
| | model = model_class.from_pretrained(tmpdirname) |
| | model.to(torch_device) |
| | with torch.no_grad(): |
| | second = model(**self._prepare_for_class(inputs_dict, model_class))[0] |
| |
|
| | if isinstance(first, tuple) and isinstance(second, tuple): |
| | for tensor1, tensor2 in zip(first, second): |
| | check_save_load(tensor1, tensor2) |
| | else: |
| | check_save_load(first, second) |
| |
|
| | def test_from_pretrained_no_checkpoint(self): |
| | config, _ = self.model_tester.prepare_config_and_inputs_for_common() |
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | state_dict = model.state_dict() |
| |
|
| | new_model = model_class.from_pretrained( |
| | pretrained_model_name_or_path=None, config=config, state_dict=state_dict |
| | ) |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.equal(p1, p2)) |
| |
|
| | def test_save_load_keys_to_ignore_on_save(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | _keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None) |
| | if _keys_to_ignore_on_save is None: |
| | continue |
| |
|
| | |
| | for k in _keys_to_ignore_on_save: |
| | self.assertIn(k, model.state_dict().keys(), "\n".join(model.state_dict().keys())) |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmpdirname: |
| | model.save_pretrained(tmpdirname) |
| | output_model_file = os.path.join(tmpdirname, WEIGHTS_NAME) |
| | state_dict_saved = torch.load(output_model_file) |
| | for k in _keys_to_ignore_on_save: |
| | self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) |
| |
|
| | |
| | load_result = model.load_state_dict(state_dict_saved, strict=False) |
| | self.assertTrue( |
| | len(load_result.missing_keys) == 0 |
| | or set(load_result.missing_keys) == set(model._keys_to_ignore_on_save) |
| | ) |
| | self.assertTrue(len(load_result.unexpected_keys) == 0) |
| |
|
| | def test_gradient_checkpointing_backward_compatibility(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | if not model_class.supports_gradient_checkpointing: |
| | continue |
| |
|
| | config.gradient_checkpointing = True |
| | model = model_class(config) |
| | self.assertTrue(model.is_gradient_checkpointing) |
| |
|
| | def test_gradient_checkpointing_enable_disable(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | if not model_class.supports_gradient_checkpointing: |
| | continue |
| |
|
| | |
| | model = model_class(config) |
| | self.assertFalse(model.is_gradient_checkpointing) |
| |
|
| | |
| | model.gradient_checkpointing_enable() |
| | self.assertTrue(model.is_gradient_checkpointing) |
| |
|
| | |
| | model.gradient_checkpointing_disable() |
| | self.assertFalse(model.is_gradient_checkpointing) |
| |
|
| | def test_save_load_fast_init_from_base(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | if config.__class__ not in MODEL_MAPPING: |
| | return |
| | base_class = MODEL_MAPPING[config.__class__] |
| |
|
| | if isinstance(base_class, tuple): |
| | base_class = base_class[0] |
| |
|
| | for model_class in self.all_model_classes: |
| | if model_class == base_class: |
| | continue |
| |
|
| | |
| | |
| | class CopyClass(model_class): |
| | pass |
| |
|
| | model_class_copy = CopyClass |
| |
|
| | |
| | model_class_copy._keys_to_ignore_on_load_missing = [] |
| |
|
| | |
| | |
| | model_class_copy._init_weights = _mock_init_weights |
| | model_class_copy.init_weights = _mock_all_init_weights |
| |
|
| | model = base_class(config) |
| | state_dict = model.state_dict() |
| |
|
| | |
| | |
| | random_key_to_del = random.choice(list(state_dict.keys())) |
| | del state_dict[random_key_to_del] |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmpdirname: |
| | model.save_pretrained(tmpdirname) |
| | torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) |
| |
|
| | model_fast_init = model_class_copy.from_pretrained(tmpdirname) |
| | model_slow_init = model_class_copy.from_pretrained(tmpdirname, _fast_init=False) |
| | |
| |
|
| | for key in model_fast_init.state_dict().keys(): |
| | if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): |
| | max_diff = (model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key]).sum().item() |
| | else: |
| | max_diff = (model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]).sum().item() |
| | self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") |
| |
|
| | def test_save_load_fast_init_to_base(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | if config.__class__ not in MODEL_MAPPING: |
| | return |
| | base_class = MODEL_MAPPING[config.__class__] |
| |
|
| | if isinstance(base_class, tuple): |
| | base_class = base_class[0] |
| |
|
| | for model_class in self.all_model_classes: |
| | if model_class == base_class: |
| | continue |
| |
|
| | |
| | |
| | class CopyClass(base_class): |
| | pass |
| |
|
| | base_class_copy = CopyClass |
| |
|
| | |
| | base_class_copy._keys_to_ignore_on_load_missing = [] |
| |
|
| | |
| | |
| | base_class_copy._init_weights = _mock_init_weights |
| | base_class_copy.init_weights = _mock_all_init_weights |
| |
|
| | model = model_class(config) |
| | state_dict = model.state_dict() |
| |
|
| | |
| | |
| | random_key_to_del = random.choice(list(state_dict.keys())) |
| | del state_dict[random_key_to_del] |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmpdirname: |
| | model.config.save_pretrained(tmpdirname) |
| | torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) |
| |
|
| | model_fast_init = base_class_copy.from_pretrained(tmpdirname) |
| | model_slow_init = base_class_copy.from_pretrained(tmpdirname, _fast_init=False) |
| |
|
| | for key in model_fast_init.state_dict().keys(): |
| | if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): |
| | max_diff = torch.max( |
| | model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key] |
| | ).item() |
| | else: |
| | max_diff = torch.max( |
| | torch.abs(model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]) |
| | ).item() |
| | self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") |
| |
|
| | def test_initialization(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | configs_no_init = _config_zero_init(config) |
| | for model_class in self.all_model_classes: |
| | model = model_class(config=configs_no_init) |
| | for name, param in model.named_parameters(): |
| | if param.requires_grad: |
| | self.assertIn( |
| | ((param.data.mean() * 1e9).round() / 1e9).item(), |
| | [0.0, 1.0], |
| | msg=f"Parameter {name} of model {model_class} seems not properly initialized", |
| | ) |
| |
|
| | def test_determinism(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | def check_determinism(first, second): |
| | out_1 = first.cpu().numpy() |
| | out_2 = second.cpu().numpy() |
| | out_1 = out_1[~np.isnan(out_1)] |
| | out_2 = out_2[~np.isnan(out_2)] |
| | max_diff = np.amax(np.abs(out_1 - out_2)) |
| | self.assertLessEqual(max_diff, 1e-5) |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| | with torch.no_grad(): |
| | first = model(**self._prepare_for_class(inputs_dict, model_class))[0] |
| | second = model(**self._prepare_for_class(inputs_dict, model_class))[0] |
| |
|
| | if isinstance(first, tuple) and isinstance(second, tuple): |
| | for tensor1, tensor2 in zip(first, second): |
| | check_determinism(tensor1, tensor2) |
| | else: |
| | check_determinism(first, second) |
| |
|
| | def test_forward_signature(self): |
| | config, _ = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | signature = inspect.signature(model.forward) |
| | |
| | arg_names = [*signature.parameters.keys()] |
| |
|
| | if model.config.is_encoder_decoder: |
| | expected_arg_names = [ |
| | "input_ids", |
| | "attention_mask", |
| | "decoder_input_ids", |
| | "decoder_attention_mask", |
| | ] |
| | expected_arg_names.extend( |
| | ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] |
| | if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names |
| | else ["encoder_outputs"] |
| | ) |
| | self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) |
| | else: |
| | expected_arg_names = ["input_ids"] |
| | self.assertListEqual(arg_names[:1], expected_arg_names) |
| |
|
| | def test_training(self): |
| | if not self.model_tester.is_training: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | config.return_dict = True |
| |
|
| | if model_class.__name__ in [ |
| | *get_values(MODEL_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), |
| | ]: |
| | continue |
| |
|
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.train() |
| | inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | loss = model(**inputs).loss |
| | loss.backward() |
| |
|
| | def test_training_gradient_checkpointing(self): |
| | if not self.model_tester.is_training: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | config.use_cache = False |
| | config.return_dict = True |
| |
|
| | if ( |
| | model_class.__name__ |
| | in [*get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES)] |
| | or not model_class.supports_gradient_checkpointing |
| | ): |
| | continue |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.gradient_checkpointing_enable() |
| | model.train() |
| | inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | loss = model(**inputs).loss |
| | loss.backward() |
| |
|
| | def test_attention_outputs(self): |
| | if not self.has_attentions: |
| | self.skipTest(reason="Model does not output attentions") |
| |
|
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | config.return_dict = True |
| |
|
| | seq_len = getattr(self.model_tester, "seq_length", None) |
| | decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) |
| | encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) |
| | decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) |
| | encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) |
| | chunk_length = getattr(self.model_tester, "chunk_length", None) |
| | if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): |
| | encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes |
| |
|
| | for model_class in self.all_model_classes: |
| | inputs_dict["output_attentions"] = True |
| | inputs_dict["output_hidden_states"] = False |
| | config.return_dict = True |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| | attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions |
| | self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) |
| |
|
| | |
| | del inputs_dict["output_attentions"] |
| | config.output_attentions = True |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| | attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions |
| | self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) |
| |
|
| | if chunk_length is not None: |
| | self.assertListEqual( |
| | list(attentions[0].shape[-4:]), |
| | [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], |
| | ) |
| | else: |
| | self.assertListEqual( |
| | list(attentions[0].shape[-3:]), |
| | [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], |
| | ) |
| | out_len = len(outputs) |
| |
|
| | if self.is_encoder_decoder: |
| | correct_outlen = 5 |
| |
|
| | |
| | if "labels" in inputs_dict: |
| | correct_outlen += 1 |
| | |
| | if model_class.__name__ in [ |
| | *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), |
| | ]: |
| | correct_outlen += 1 |
| | if "past_key_values" in outputs: |
| | correct_outlen += 1 |
| |
|
| | self.assertEqual(out_len, correct_outlen) |
| |
|
| | |
| | decoder_attentions = outputs.decoder_attentions |
| | self.assertIsInstance(decoder_attentions, (list, tuple)) |
| | self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) |
| | self.assertListEqual( |
| | list(decoder_attentions[0].shape[-3:]), |
| | [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], |
| | ) |
| |
|
| | |
| | cross_attentions = outputs.cross_attentions |
| | self.assertIsInstance(cross_attentions, (list, tuple)) |
| | self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) |
| | self.assertListEqual( |
| | list(cross_attentions[0].shape[-3:]), |
| | [ |
| | self.model_tester.num_attention_heads, |
| | decoder_seq_length, |
| | encoder_key_length, |
| | ], |
| | ) |
| |
|
| | |
| | inputs_dict["output_attentions"] = True |
| | inputs_dict["output_hidden_states"] = True |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | if hasattr(self.model_tester, "num_hidden_states_types"): |
| | added_hidden_states = self.model_tester.num_hidden_states_types |
| | elif self.is_encoder_decoder: |
| | added_hidden_states = 2 |
| | else: |
| | added_hidden_states = 1 |
| | self.assertEqual(out_len + added_hidden_states, len(outputs)) |
| |
|
| | self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions |
| |
|
| | self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) |
| | if chunk_length is not None: |
| | self.assertListEqual( |
| | list(self_attentions[0].shape[-4:]), |
| | [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], |
| | ) |
| | else: |
| | self.assertListEqual( |
| | list(self_attentions[0].shape[-3:]), |
| | [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], |
| | ) |
| |
|
| | @slow |
| | def test_torchscript_simple(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | self._create_and_check_torchscript(config, inputs_dict) |
| |
|
| | @slow |
| | def test_torchscript_output_attentions(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | config.output_attentions = True |
| | self._create_and_check_torchscript(config, inputs_dict) |
| |
|
| | @slow |
| | def test_torchscript_output_hidden_state(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | config.output_hidden_states = True |
| | self._create_and_check_torchscript(config, inputs_dict) |
| |
|
| | |
| | def clear_torch_jit_class_registry(self): |
| | torch._C._jit_clear_class_registry() |
| | torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() |
| | |
| | if hasattr(torch.jit._state, "_clear_class_state"): |
| | torch.jit._state._clear_class_state() |
| |
|
| | def _create_and_check_torchscript(self, config, inputs_dict): |
| | if not self.test_torchscript: |
| | return |
| |
|
| | configs_no_init = _config_zero_init(config) |
| | configs_no_init.torchscript = True |
| | for model_class in self.all_model_classes: |
| | model = model_class(config=configs_no_init) |
| | model.to(torch_device) |
| | model.eval() |
| | inputs = self._prepare_for_class(inputs_dict, model_class) |
| |
|
| | main_input_name = model_class.main_input_name |
| |
|
| | try: |
| | if model.config.is_encoder_decoder: |
| | model.config.use_cache = False |
| | main_input = inputs[main_input_name] |
| | attention_mask = inputs["attention_mask"] |
| | decoder_input_ids = inputs["decoder_input_ids"] |
| | decoder_attention_mask = inputs["decoder_attention_mask"] |
| | model(main_input, attention_mask, decoder_input_ids, decoder_attention_mask) |
| | traced_model = torch.jit.trace( |
| | model, (main_input, attention_mask, decoder_input_ids, decoder_attention_mask) |
| | ) |
| | elif "bbox" in inputs and "image" in inputs: |
| | input_ids = inputs["input_ids"] |
| | bbox = inputs["bbox"] |
| | image = inputs["image"].tensor |
| | model(input_ids, bbox, image) |
| | traced_model = torch.jit.trace( |
| | model, (input_ids, bbox, image), check_trace=False |
| | ) |
| | else: |
| | main_input = inputs[main_input_name] |
| | model(main_input) |
| | traced_model = torch.jit.trace(model, main_input) |
| | except RuntimeError: |
| | self.fail("Couldn't trace module.") |
| |
|
| | with tempfile.TemporaryDirectory() as tmp_dir_name: |
| | pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") |
| |
|
| | try: |
| | torch.jit.save(traced_model, pt_file_name) |
| | except Exception: |
| | self.fail("Couldn't save module.") |
| |
|
| | try: |
| | loaded_model = torch.jit.load(pt_file_name) |
| | except Exception: |
| | self.fail("Couldn't load module.") |
| |
|
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | loaded_model.to(torch_device) |
| | loaded_model.eval() |
| |
|
| | model_state_dict = model.state_dict() |
| | loaded_model_state_dict = loaded_model.state_dict() |
| |
|
| | non_persistent_buffers = {} |
| | for key in loaded_model_state_dict.keys(): |
| | if key not in model_state_dict.keys(): |
| | non_persistent_buffers[key] = loaded_model_state_dict[key] |
| |
|
| | loaded_model_state_dict = { |
| | key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers |
| | } |
| |
|
| | self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) |
| |
|
| | model_buffers = list(model.buffers()) |
| | for non_persistent_buffer in non_persistent_buffers.values(): |
| | found_buffer = False |
| | for i, model_buffer in enumerate(model_buffers): |
| | if torch.equal(non_persistent_buffer, model_buffer): |
| | found_buffer = True |
| | break |
| |
|
| | self.assertTrue(found_buffer) |
| | model_buffers.pop(i) |
| |
|
| | models_equal = True |
| | for layer_name, p1 in model_state_dict.items(): |
| | if layer_name in loaded_model_state_dict: |
| | p2 = loaded_model_state_dict[layer_name] |
| | if p1.data.ne(p2.data).sum() > 0: |
| | models_equal = False |
| |
|
| | self.assertTrue(models_equal) |
| |
|
| | |
| | |
| | self.clear_torch_jit_class_registry() |
| |
|
| | def test_torch_fx(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | self._create_and_check_torch_fx_tracing(config, inputs_dict) |
| |
|
| | def test_torch_fx_output_loss(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | self._create_and_check_torch_fx_tracing(config, inputs_dict, output_loss=True) |
| |
|
| | def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): |
| | if not is_torch_fx_available() or not self.fx_compatible: |
| | return |
| |
|
| | configs_no_init = _config_zero_init(config) |
| | configs_no_init.return_dict = False |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config=configs_no_init) |
| | model.to(torch_device) |
| | model.eval() |
| | inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) |
| |
|
| | try: |
| | if model.config.is_encoder_decoder: |
| | model.config.use_cache = False |
| | labels = inputs.get("labels", None) |
| | input_names = [ |
| | "attention_mask", |
| | "decoder_attention_mask", |
| | "decoder_input_ids", |
| | "input_features", |
| | "input_ids", |
| | "input_values", |
| | ] |
| | if labels is not None: |
| | input_names.append("labels") |
| |
|
| | filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} |
| | input_names = list(filtered_inputs.keys()) |
| |
|
| | model_output = model(**filtered_inputs) |
| |
|
| | traced_model = symbolic_trace(model, input_names) |
| | traced_output = traced_model(**filtered_inputs) |
| | else: |
| | input_names = [ |
| | "attention_mask", |
| | "bbox", |
| | "input_features", |
| | "input_ids", |
| | "input_values", |
| | "pixel_values", |
| | "token_type_ids", |
| | "visual_feats", |
| | "visual_pos", |
| | ] |
| |
|
| | labels = inputs.get("labels", None) |
| | start_positions = inputs.get("start_positions", None) |
| | end_positions = inputs.get("end_positions", None) |
| | if labels is not None: |
| | input_names.append("labels") |
| | if start_positions is not None: |
| | input_names.append("start_positions") |
| | if end_positions is not None: |
| | input_names.append("end_positions") |
| |
|
| | filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} |
| | input_names = list(filtered_inputs.keys()) |
| |
|
| | if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( |
| | not hasattr(model.config, "problem_type") or model.config.problem_type is None |
| | ): |
| | model.config.problem_type = "single_label_classification" |
| |
|
| | traced_model = symbolic_trace(model, input_names) |
| | traced_output = traced_model(**filtered_inputs) |
| | model_output = model(**filtered_inputs) |
| |
|
| | except Exception as e: |
| | self.fail(f"Couldn't trace module: {e}") |
| |
|
| | def flatten_output(output): |
| | flatten = [] |
| | for x in output: |
| | if isinstance(x, (tuple, list)): |
| | flatten += flatten_output(x) |
| | elif not isinstance(x, torch.Tensor): |
| | continue |
| | else: |
| | flatten.append(x) |
| | return flatten |
| |
|
| | model_output = flatten_output(model_output) |
| | traced_output = flatten_output(traced_output) |
| | num_outputs = len(model_output) |
| |
|
| | for i in range(num_outputs): |
| | self.assertTrue( |
| | torch.allclose(model_output[i], traced_output[i]), |
| | f"traced {i}th output doesn't match model {i}th output for {model_class}", |
| | ) |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmp_dir_name: |
| | pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") |
| | try: |
| | with open(pkl_file_name, "wb") as f: |
| | pickle.dump(traced_model, f) |
| | with open(pkl_file_name, "rb") as f: |
| | loaded = pickle.load(f) |
| | except Exception as e: |
| | self.fail(f"Couldn't serialize / deserialize the traced model: {e}") |
| |
|
| | loaded_output = loaded(**filtered_inputs) |
| | loaded_output = flatten_output(loaded_output) |
| |
|
| | for i in range(num_outputs): |
| | self.assertTrue( |
| | torch.allclose(model_output[i], loaded_output[i]), |
| | f"serialized model {i}th output doesn't match model {i}th output for {model_class}", |
| | ) |
| |
|
| | |
| | |
| | self.clear_torch_jit_class_registry() |
| |
|
| | def test_headmasking(self): |
| | if not self.test_head_masking: |
| | return |
| |
|
| | global_rng.seed(42) |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | global_rng.seed() |
| |
|
| | inputs_dict["output_attentions"] = True |
| | config.output_hidden_states = True |
| | configs_no_init = _config_zero_init(config) |
| | for model_class in self.all_model_classes: |
| | model = model_class(config=configs_no_init) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | |
| | |
| | head_mask = torch.ones( |
| | self.model_tester.num_hidden_layers, |
| | self.model_tester.num_attention_heads, |
| | device=torch_device, |
| | ) |
| | head_mask[0, 0] = 0 |
| | head_mask[-1, :-1] = 0 |
| | head_mask.requires_grad_(requires_grad=True) |
| | inputs = self._prepare_for_class(inputs_dict, model_class).copy() |
| | inputs["head_mask"] = head_mask |
| | if model.config.is_encoder_decoder: |
| | signature = inspect.signature(model.forward) |
| | arg_names = [*signature.parameters.keys()] |
| | if "decoder_head_mask" in arg_names: |
| | inputs["decoder_head_mask"] = head_mask |
| | if "cross_attn_head_mask" in arg_names: |
| | inputs["cross_attn_head_mask"] = head_mask |
| | outputs = model(**inputs, return_dict=True) |
| |
|
| | |
| | output = sum(t.sum() for t in outputs[0]) |
| | output = output.sum() |
| | output.backward() |
| | multihead_outputs = head_mask.grad |
| |
|
| | self.assertIsNotNone(multihead_outputs) |
| | self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) |
| |
|
| | def check_attentions_validity(attentions): |
| | |
| | for t in attentions: |
| | self.assertLess( |
| | torch.sum(torch.isnan(t)), t.numel() / 4 |
| | ) |
| | attentions = [ |
| | t.masked_fill(torch.isnan(t), 0.0) for t in attentions |
| | ] |
| |
|
| | self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) |
| | self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) |
| | if len(attentions) > 2: |
| | self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) |
| | self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) |
| | self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) |
| |
|
| | if model.config.is_encoder_decoder: |
| | check_attentions_validity(outputs.encoder_attentions) |
| | check_attentions_validity(outputs.decoder_attentions) |
| | check_attentions_validity(outputs.cross_attentions) |
| | else: |
| | check_attentions_validity(outputs.attentions) |
| |
|
| | def test_head_pruning(self): |
| | if not self.test_pruning: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | ( |
| | config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | if "head_mask" in inputs_dict: |
| | del inputs_dict["head_mask"] |
| |
|
| | inputs_dict["output_attentions"] = True |
| | config.output_hidden_states = False |
| | model = model_class(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| | heads_to_prune = { |
| | 0: list(range(1, self.model_tester.num_attention_heads)), |
| | -1: [0], |
| | } |
| | model.prune_heads(heads_to_prune) |
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | attentions = outputs[-1] |
| |
|
| | self.assertEqual(attentions[0].shape[-3], 1) |
| | self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) |
| | self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) |
| |
|
| | def test_head_pruning_save_load_from_pretrained(self): |
| | if not self.test_pruning: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | ( |
| | config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | if "head_mask" in inputs_dict: |
| | del inputs_dict["head_mask"] |
| |
|
| | inputs_dict["output_attentions"] = True |
| | config.output_hidden_states = False |
| | model = model_class(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| | heads_to_prune = { |
| | 0: list(range(1, self.model_tester.num_attention_heads)), |
| | -1: [0], |
| | } |
| | model.prune_heads(heads_to_prune) |
| |
|
| | with tempfile.TemporaryDirectory() as temp_dir_name: |
| | model.save_pretrained(temp_dir_name) |
| | model = model_class.from_pretrained(temp_dir_name) |
| | model.to(torch_device) |
| |
|
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| | attentions = outputs[-1] |
| | self.assertEqual(attentions[0].shape[-3], 1) |
| | self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) |
| | self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) |
| |
|
| | def test_head_pruning_save_load_from_config_init(self): |
| | if not self.test_pruning: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | ( |
| | config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | if "head_mask" in inputs_dict: |
| | del inputs_dict["head_mask"] |
| |
|
| | inputs_dict["output_attentions"] = True |
| | config.output_hidden_states = False |
| |
|
| | heads_to_prune = { |
| | 0: list(range(1, self.model_tester.num_attention_heads)), |
| | -1: [0], |
| | } |
| | config.pruned_heads = heads_to_prune |
| |
|
| | model = model_class(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| | attentions = outputs[-1] |
| |
|
| | self.assertEqual(attentions[0].shape[-3], 1) |
| | self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) |
| | self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) |
| |
|
| | def test_head_pruning_integration(self): |
| | if not self.test_pruning: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | ( |
| | config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | if "head_mask" in inputs_dict: |
| | del inputs_dict["head_mask"] |
| |
|
| | inputs_dict["output_attentions"] = True |
| | config.output_hidden_states = False |
| |
|
| | heads_to_prune = {0: [0], 1: [1, 2]} |
| | config.pruned_heads = heads_to_prune |
| |
|
| | model = model_class(config=config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| | attentions = outputs[-1] |
| |
|
| | self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) |
| | self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) |
| | self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads) |
| | self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads) |
| |
|
| | with tempfile.TemporaryDirectory() as temp_dir_name: |
| | model.save_pretrained(temp_dir_name) |
| | model = model_class.from_pretrained(temp_dir_name) |
| | model.to(torch_device) |
| |
|
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| | attentions = outputs[-1] |
| |
|
| | self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) |
| | self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) |
| | self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads) |
| | self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads) |
| |
|
| | heads_to_prune = {0: [0], 2: [1, 2]} |
| | model.prune_heads(heads_to_prune) |
| |
|
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| | attentions = outputs[-1] |
| |
|
| | self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) |
| | self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) |
| | self.assertEqual(attentions[2].shape[-3], self.model_tester.num_attention_heads - 2) |
| | self.assertEqual(attentions[3].shape[-3], self.model_tester.num_attention_heads) |
| |
|
| | self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2], 2: [1, 2]}) |
| |
|
| | def test_hidden_states_output(self): |
| | def check_hidden_states_output(inputs_dict, config, model_class): |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | with torch.no_grad(): |
| | outputs = model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states |
| |
|
| | expected_num_layers = getattr( |
| | self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 |
| | ) |
| | self.assertEqual(len(hidden_states), expected_num_layers) |
| |
|
| | if hasattr(self.model_tester, "encoder_seq_length"): |
| | seq_length = self.model_tester.encoder_seq_length |
| | if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: |
| | seq_length = seq_length * self.model_tester.chunk_length |
| | else: |
| | seq_length = self.model_tester.seq_length |
| |
|
| | self.assertListEqual( |
| | list(hidden_states[0].shape[-2:]), |
| | [seq_length, self.model_tester.hidden_size], |
| | ) |
| |
|
| | if config.is_encoder_decoder: |
| | hidden_states = outputs.decoder_hidden_states |
| |
|
| | self.assertIsInstance(hidden_states, (list, tuple)) |
| | self.assertEqual(len(hidden_states), expected_num_layers) |
| | seq_len = getattr(self.model_tester, "seq_length", None) |
| | decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) |
| |
|
| | self.assertListEqual( |
| | list(hidden_states[0].shape[-2:]), |
| | [decoder_seq_length, self.model_tester.hidden_size], |
| | ) |
| |
|
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | inputs_dict["output_hidden_states"] = True |
| | check_hidden_states_output(inputs_dict, config, model_class) |
| |
|
| | |
| | del inputs_dict["output_hidden_states"] |
| | config.output_hidden_states = True |
| |
|
| | check_hidden_states_output(inputs_dict, config, model_class) |
| |
|
| | def test_retain_grad_hidden_states_attentions(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | config.output_hidden_states = True |
| | config.output_attentions = self.has_attentions |
| |
|
| | |
| | model_class = self.all_model_classes[0] |
| | model = model_class(config) |
| | model.to(torch_device) |
| |
|
| | inputs = self._prepare_for_class(inputs_dict, model_class) |
| |
|
| | outputs = model(**inputs) |
| |
|
| | output = outputs[0] |
| |
|
| | if config.is_encoder_decoder: |
| | |
| | encoder_hidden_states = outputs.encoder_hidden_states[0] |
| | encoder_hidden_states.retain_grad() |
| |
|
| | decoder_hidden_states = outputs.decoder_hidden_states[0] |
| | decoder_hidden_states.retain_grad() |
| |
|
| | if self.has_attentions: |
| | encoder_attentions = outputs.encoder_attentions[0] |
| | encoder_attentions.retain_grad() |
| |
|
| | decoder_attentions = outputs.decoder_attentions[0] |
| | decoder_attentions.retain_grad() |
| |
|
| | cross_attentions = outputs.cross_attentions[0] |
| | cross_attentions.retain_grad() |
| |
|
| | output.flatten()[0].backward(retain_graph=True) |
| |
|
| | self.assertIsNotNone(encoder_hidden_states.grad) |
| | self.assertIsNotNone(decoder_hidden_states.grad) |
| |
|
| | if self.has_attentions: |
| | self.assertIsNotNone(encoder_attentions.grad) |
| | self.assertIsNotNone(decoder_attentions.grad) |
| | self.assertIsNotNone(cross_attentions.grad) |
| | else: |
| | |
| | hidden_states = outputs.hidden_states[0] |
| | hidden_states.retain_grad() |
| |
|
| | if self.has_attentions: |
| | attentions = outputs.attentions[0] |
| | attentions.retain_grad() |
| |
|
| | output.flatten()[0].backward(retain_graph=True) |
| |
|
| | self.assertIsNotNone(hidden_states.grad) |
| |
|
| | if self.has_attentions: |
| | self.assertIsNotNone(attentions.grad) |
| |
|
| | def test_feed_forward_chunking(self): |
| | ( |
| | original_config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| | for model_class in self.all_model_classes: |
| | torch.manual_seed(0) |
| | config = copy.deepcopy(original_config) |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] |
| |
|
| | torch.manual_seed(0) |
| | config.chunk_size_feed_forward = 1 |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] |
| | self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) |
| |
|
| | def test_resize_position_vector_embeddings(self): |
| | if not self.test_resize_position_embeddings: |
| | return |
| |
|
| | ( |
| | original_config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | config = copy.deepcopy(original_config) |
| | model = model_class(config) |
| | model.to(torch_device) |
| |
|
| | if self.model_tester.is_training is False: |
| | model.eval() |
| |
|
| | max_position_embeddings = config.max_position_embeddings |
| |
|
| | |
| | if model.config.is_encoder_decoder: |
| | encoder_model_embed, decoder_model_embed = model.get_position_embeddings() |
| | encoder_cloned_embeddings = encoder_model_embed.weight.clone() |
| | decoder_cloned_embeddings = decoder_model_embed.weight.clone() |
| | else: |
| | model_embed = model.get_position_embeddings() |
| | cloned_embeddings = model_embed.weight.clone() |
| |
|
| | |
| | |
| | model.resize_position_embeddings(max_position_embeddings + 10) |
| | self.assertEqual(model.config.max_position_embeddings, max_position_embeddings + 10) |
| |
|
| | |
| | if model.config.is_encoder_decoder: |
| | encoder_model_embed, decoder_model_embed = model.get_position_embeddings() |
| | self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] + 10) |
| | self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] + 10) |
| | else: |
| | model_embed = model.get_position_embeddings() |
| | self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) |
| |
|
| | |
| | model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | |
| | |
| | model.resize_position_embeddings(max_position_embeddings - 5) |
| | self.assertEqual(model.config.max_position_embeddings, max_position_embeddings - 5) |
| |
|
| | |
| | if model.config.is_encoder_decoder: |
| | encoder_model_embed, decoder_model_embed = model.get_position_embeddings() |
| | self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] - 5) |
| | self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] - 5) |
| | else: |
| | model_embed = model.get_position_embeddings() |
| | self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 5) |
| |
|
| | |
| | model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | |
| | models_equal = True |
| |
|
| | if model.config.is_encoder_decoder: |
| | for p1, p2 in zip(encoder_cloned_embeddings, encoder_model_embed.weight): |
| | if p1.data.ne(p2.data).sum() > 0: |
| | models_equal = False |
| | for p1, p2 in zip(decoder_cloned_embeddings, decoder_model_embed.weight): |
| | if p1.data.ne(p2.data).sum() > 0: |
| | models_equal = False |
| | else: |
| | for p1, p2 in zip(cloned_embeddings, model_embed.weight): |
| | if p1.data.ne(p2.data).sum() > 0: |
| | models_equal = False |
| |
|
| | self.assertTrue(models_equal) |
| |
|
| | def test_resize_tokens_embeddings(self): |
| | ( |
| | original_config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| | if not self.test_resize_embeddings: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | config = copy.deepcopy(original_config) |
| | model = model_class(config) |
| | model.to(torch_device) |
| |
|
| | if self.model_tester.is_training is False: |
| | model.eval() |
| |
|
| | model_vocab_size = config.vocab_size |
| | |
| | model_embed = model.resize_token_embeddings(model_vocab_size) |
| | cloned_embeddings = model_embed.weight.clone() |
| |
|
| | |
| | model_embed = model.resize_token_embeddings(model_vocab_size + 10) |
| | self.assertEqual(model.config.vocab_size, model_vocab_size + 10) |
| | |
| | self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) |
| | |
| | model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | |
| | model_embed = model.resize_token_embeddings(model_vocab_size - 15) |
| | self.assertEqual(model.config.vocab_size, model_vocab_size - 15) |
| | |
| | self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) |
| |
|
| | |
| | |
| | inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) |
| |
|
| | |
| | if "decoder_input_ids" in inputs_dict: |
| | inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) |
| | model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | |
| | models_equal = True |
| | for p1, p2 in zip(cloned_embeddings, model_embed.weight): |
| | if p1.data.ne(p2.data).sum() > 0: |
| | models_equal = False |
| |
|
| | self.assertTrue(models_equal) |
| |
|
| | def test_resize_embeddings_untied(self): |
| | ( |
| | original_config, |
| | inputs_dict, |
| | ) = self.model_tester.prepare_config_and_inputs_for_common() |
| | if not self.test_resize_embeddings: |
| | return |
| |
|
| | original_config.tie_word_embeddings = False |
| |
|
| | |
| | if original_config.tie_word_embeddings: |
| | return |
| |
|
| | for model_class in self.all_model_classes: |
| | config = copy.deepcopy(original_config) |
| | model = model_class(config).to(torch_device) |
| |
|
| | |
| | if model.get_output_embeddings() is None: |
| | continue |
| |
|
| | |
| | model_vocab_size = config.vocab_size |
| | model.resize_token_embeddings(model_vocab_size + 10) |
| | self.assertEqual(model.config.vocab_size, model_vocab_size + 10) |
| | output_embeds = model.get_output_embeddings() |
| | self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) |
| | |
| | if output_embeds.bias is not None: |
| | self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) |
| | |
| | model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | |
| | model.resize_token_embeddings(model_vocab_size - 15) |
| | self.assertEqual(model.config.vocab_size, model_vocab_size - 15) |
| | |
| | output_embeds = model.get_output_embeddings() |
| | self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) |
| | |
| | if output_embeds.bias is not None: |
| | self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) |
| | |
| | |
| | inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) |
| | if "decoder_input_ids" in inputs_dict: |
| | inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) |
| | |
| | model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | def test_model_common_attributes(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding, AdaptiveEmbedding)) |
| | model.set_input_embeddings(nn.Embedding(10, 10)) |
| | x = model.get_output_embeddings() |
| | self.assertTrue(x is None or isinstance(x, nn.Linear)) |
| |
|
| | def test_model_main_input_name(self): |
| | for model_class in self.all_model_classes: |
| | model_signature = inspect.signature(getattr(model_class, "forward")) |
| | |
| | observed_main_input_name = list(model_signature.parameters.keys())[1] |
| | self.assertEqual(model_class.main_input_name, observed_main_input_name) |
| |
|
| | def test_correct_missing_keys(self): |
| | if not self.test_missing_keys: |
| | return |
| | config, _ = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | base_model_prefix = model.base_model_prefix |
| |
|
| | if hasattr(model, base_model_prefix): |
| | extra_params = {k: v for k, v in model.named_parameters() if not k.startswith(base_model_prefix)} |
| | extra_params.update({k: v for k, v in model.named_buffers() if not k.startswith(base_model_prefix)}) |
| | |
| | if model._keys_to_ignore_on_load_missing: |
| | for key in model._keys_to_ignore_on_load_missing: |
| | extra_params.pop(key, None) |
| |
|
| | if not extra_params: |
| | |
| | |
| | |
| | continue |
| |
|
| | with tempfile.TemporaryDirectory() as temp_dir_name: |
| | model.base_model.save_pretrained(temp_dir_name) |
| | model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) |
| | self.assertGreater(len(loading_info["missing_keys"]), 0, model.__class__.__name__) |
| |
|
| | def test_tie_model_weights(self): |
| | if not self.test_torchscript: |
| | return |
| |
|
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | def check_same_values(layer_1, layer_2): |
| | equal = True |
| | for p1, p2 in zip(layer_1.weight, layer_2.weight): |
| | if p1.data.ne(p2.data).sum() > 0: |
| | equal = False |
| | return equal |
| |
|
| | for model_class in self.all_model_classes: |
| | config.torchscript = True |
| | model_not_tied = model_class(config) |
| | if model_not_tied.get_output_embeddings() is None: |
| | continue |
| |
|
| | config_tied = copy.deepcopy(config) |
| | config_tied.torchscript = False |
| | model_tied = model_class(config_tied) |
| | params_tied = list(model_tied.parameters()) |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | model_tied.resize_token_embeddings(config.vocab_size + 10) |
| | params_tied_2 = list(model_tied.parameters()) |
| | self.assertEqual(len(params_tied_2), len(params_tied)) |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | def test_tied_model_weights_key_ignore(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| | for model_class in self.all_model_classes: |
| | model_tied = model_class(config) |
| | with tempfile.TemporaryDirectory() as d: |
| | model_tied.save_pretrained(d) |
| |
|
| | |
| | |
| | with open(os.path.join(d, "pytorch_model.bin"), "wb") as f: |
| | torch.save({}, f) |
| | model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | prefix = f"{model_reloaded.base_model_prefix}." |
| | params = dict(model_reloaded.named_parameters()) |
| | params.update(dict(model_reloaded.named_buffers())) |
| | |
| | param_names = {k[len(prefix) :] if k.startswith(prefix) else k for k in params.keys()} |
| |
|
| | missing_keys = set(infos["missing_keys"]) |
| |
|
| | extra_missing = missing_keys - param_names |
| | |
| |
|
| | self.assertEqual( |
| | extra_missing, |
| | set(), |
| | f"This model {model_class.__name__} might be missing some `keys_to_ignore`: {extra_missing}", |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | def test_model_outputs_equivalence(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | def set_nan_tensor_to_zero(t): |
| | t[t != t] = 0 |
| | return t |
| |
|
| | def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): |
| | with torch.no_grad(): |
| | tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) |
| | dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() |
| |
|
| | def recursive_check(tuple_object, dict_object): |
| | if isinstance(tuple_object, (List, Tuple)): |
| | for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): |
| | recursive_check(tuple_iterable_value, dict_iterable_value) |
| | elif isinstance(tuple_object, Dict): |
| | for tuple_iterable_value, dict_iterable_value in zip( |
| | tuple_object.values(), dict_object.values() |
| | ): |
| | recursive_check(tuple_iterable_value, dict_iterable_value) |
| | elif tuple_object is None: |
| | return |
| | else: |
| | self.assertTrue( |
| | torch.allclose( |
| | set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 |
| | ), |
| | msg=( |
| | "Tuple and dict output are not equal. Difference:" |
| | f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" |
| | f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" |
| | f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." |
| | ), |
| | ) |
| |
|
| | recursive_check(tuple_output, dict_output) |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | tuple_inputs = self._prepare_for_class(inputs_dict, model_class) |
| | dict_inputs = self._prepare_for_class(inputs_dict, model_class) |
| | check_equivalence(model, tuple_inputs, dict_inputs) |
| |
|
| | tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | check_equivalence(model, tuple_inputs, dict_inputs) |
| |
|
| | tuple_inputs = self._prepare_for_class(inputs_dict, model_class) |
| | dict_inputs = self._prepare_for_class(inputs_dict, model_class) |
| | check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) |
| |
|
| | tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) |
| |
|
| | if self.has_attentions: |
| | tuple_inputs = self._prepare_for_class(inputs_dict, model_class) |
| | dict_inputs = self._prepare_for_class(inputs_dict, model_class) |
| | check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) |
| |
|
| | tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) |
| |
|
| | tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| | check_equivalence( |
| | model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} |
| | ) |
| |
|
| | |
| | |
| | def _make_attention_mask_non_null(self, inputs_dict): |
| | """Make sure no sequence has all zeros as attention mask""" |
| |
|
| | for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: |
| | if k in inputs_dict: |
| | attention_mask = inputs_dict[k] |
| |
|
| | |
| | |
| | |
| | attention_mask = torch.cat( |
| | [torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1 |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | inputs_dict[k] = attention_mask |
| |
|
| | |
| | |
| | def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): |
| | """For temporarily ignoring some failed test cases (issues to be fixed)""" |
| |
|
| | tf_keys = {k for k, v in tf_outputs.items() if v is not None} |
| | pt_keys = {k for k, v in pt_outputs.items() if v is not None} |
| |
|
| | key_differences = tf_keys.symmetric_difference(pt_keys) |
| |
|
| | if model_class.__name__ in [ |
| | "FlaubertWithLMHeadModel", |
| | "FunnelForPreTraining", |
| | "ElectraForPreTraining", |
| | "XLMWithLMHeadModel", |
| | "TransfoXLLMHeadModel", |
| | ]: |
| | for k in key_differences: |
| | if k in ["loss", "losses"]: |
| | tf_keys.discard(k) |
| | pt_keys.discard(k) |
| | elif model_class.__name__.startswith("GPT2"): |
| | |
| | tf_keys.discard("past_key_values") |
| | pt_keys.discard("past_key_values") |
| |
|
| | |
| | new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) |
| | new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) |
| |
|
| | return new_tf_outputs, new_pt_outputs |
| |
|
| | |
| | def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): |
| | """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. |
| | |
| | Args: |
| | model_class: The class of the model that is currently testing. For example, `TFBertModel`, |
| | TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative |
| | error messages. |
| | name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. |
| | attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element |
| | being a named field in the output. |
| | """ |
| |
|
| | self.assertEqual(type(name), str) |
| | if attributes is not None: |
| | self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") |
| |
|
| | |
| | if isinstance(tf_outputs, ModelOutput): |
| | self.assertTrue( |
| | isinstance(pt_outputs, ModelOutput), |
| | f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", |
| | ) |
| |
|
| | |
| | |
| | tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) |
| |
|
| | tf_keys = [k for k, v in tf_outputs.items() if v is not None] |
| | pt_keys = [k for k, v in pt_outputs.items() if v is not None] |
| |
|
| | self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") |
| |
|
| | |
| | |
| | attributes = tuple([f"{name}.{k}" for k in tf_keys]) |
| | self.check_pt_tf_outputs( |
| | tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes |
| | ) |
| |
|
| | |
| | elif type(tf_outputs) in [tuple, list]: |
| | self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") |
| | self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") |
| |
|
| | if attributes is not None: |
| | |
| | self.assertEqual( |
| | len(attributes), |
| | len(tf_outputs), |
| | f"{name}: The tuple `attributes` should have the same length as `tf_outputs`", |
| | ) |
| | else: |
| | |
| | attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) |
| |
|
| | for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): |
| | self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) |
| |
|
| | elif isinstance(tf_outputs, tf.Tensor): |
| | self.assertTrue( |
| | isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" |
| | ) |
| |
|
| | tf_outputs = tf_outputs.numpy() |
| | pt_outputs = pt_outputs.detach().to("cpu").numpy() |
| |
|
| | self.assertEqual( |
| | tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" |
| | ) |
| |
|
| | |
| | if np.isscalar(tf_outputs): |
| | tf_outputs = np.array([tf_outputs]) |
| | pt_outputs = np.array([pt_outputs]) |
| |
|
| | tf_nans = np.isnan(tf_outputs) |
| | pt_nans = np.isnan(pt_outputs) |
| |
|
| | pt_outputs[tf_nans] = 0 |
| | tf_outputs[tf_nans] = 0 |
| | pt_outputs[pt_nans] = 0 |
| | tf_outputs[pt_nans] = 0 |
| |
|
| | max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) |
| | self.assertLessEqual(max_diff, tol, f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}).") |
| | else: |
| | raise ValueError( |
| | "`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got" |
| | f" {type(tf_outputs)} instead." |
| | ) |
| |
|
| | def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): |
| | tf_inputs_dict = {} |
| | for key, tensor in pt_inputs_dict.items(): |
| | |
| | if type(tensor) == bool: |
| | tf_inputs_dict[key] = tensor |
| | elif key == "input_values": |
| | tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) |
| | elif key == "pixel_values": |
| | tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) |
| | elif key == "input_features": |
| | tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) |
| | |
| | elif tensor.is_floating_point(): |
| | tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) |
| | else: |
| | tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.int32) |
| |
|
| | return tf_inputs_dict |
| |
|
| | def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): |
| | tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) |
| |
|
| | |
| | pt_inputs_dict = { |
| | k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() |
| | } |
| |
|
| | |
| | pt_model.to(torch_device) |
| |
|
| | |
| | pt_model.eval() |
| |
|
| | with torch.no_grad(): |
| | pt_outputs = pt_model(**pt_inputs_dict) |
| | tf_outputs = tf_model(tf_inputs_dict) |
| |
|
| | |
| | |
| | |
| | tf_loss = getattr(tf_outputs, "loss", None) |
| | if tf_loss is not None: |
| | tf_outputs.loss = tf.math.reduce_mean(tf_loss) |
| |
|
| | self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(pt_model)) |
| |
|
| | @is_pt_tf_cross_test |
| | def test_pt_tf_model_equivalence(self): |
| | import transformers |
| |
|
| | for model_class in self.all_model_classes: |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | tf_model_class_name = "TF" + model_class.__name__ |
| | if not hasattr(transformers, tf_model_class_name): |
| | |
| | return |
| |
|
| | |
| | config.output_hidden_states = True |
| | config.output_attentions = self.has_attentions |
| |
|
| | |
| | |
| | |
| | self._make_attention_mask_non_null(inputs_dict) |
| |
|
| | tf_model_class = getattr(transformers, tf_model_class_name) |
| |
|
| | pt_model = model_class(config) |
| | tf_model = tf_model_class(config) |
| |
|
| | pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class) |
| | pt_inputs_dict_with_labels = self._prepare_for_class( |
| | inputs_dict, |
| | model_class, |
| | |
| | return_labels=True if "labels" in inspect.signature(model_class.forward).parameters.keys() else False, |
| | ) |
| |
|
| | |
| | tf_input_keys = set(inspect.signature(tf_model.call).parameters.keys()) |
| |
|
| | |
| | tf_input_keys.discard("head_mask") |
| | tf_input_keys.discard("cross_attn_head_mask") |
| | tf_input_keys.discard("decoder_head_mask") |
| |
|
| | pt_inputs_dict = {k: v for k, v in pt_inputs_dict.items() if k in tf_input_keys} |
| | pt_inputs_dict_with_labels = {k: v for k, v in pt_inputs_dict_with_labels.items() if k in tf_input_keys} |
| |
|
| | |
| | |
| | if set(pt_inputs_dict_with_labels.keys()).symmetric_difference(pt_inputs_dict.keys()): |
| | pt_inputs_dict_with_labels = None |
| |
|
| | |
| | |
| | tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) |
| | tf_model = transformers.load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=tf_inputs_dict) |
| | pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model) |
| |
|
| | |
| | self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) |
| | |
| | if pt_inputs_dict_with_labels: |
| | self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmpdirname: |
| | pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") |
| | torch.save(pt_model.state_dict(), pt_checkpoint_path) |
| | tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path) |
| |
|
| | tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") |
| | tf_model.save_weights(tf_checkpoint_path) |
| | pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path) |
| |
|
| | |
| | self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) |
| | |
| | if pt_inputs_dict_with_labels: |
| | self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) |
| |
|
| | def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): |
| | diff = np.abs((a - b)).max() |
| | self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") |
| |
|
| | def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): |
| | """ |
| | Args: |
| | model_class: The class of the model that is currently testing. For example, ..., etc. |
| | Currently unused, but it could make debugging easier and faster. |
| | |
| | names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. |
| | Currently unused, but in the future, we could use this information to make the error message clearer |
| | by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. |
| | """ |
| |
|
| | self.assertEqual(type(name), str) |
| | if attributes is not None: |
| | self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") |
| |
|
| | |
| | if isinstance(fx_outputs, ModelOutput): |
| | self.assertTrue( |
| | isinstance(pt_outputs, ModelOutput), |
| | f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", |
| | ) |
| |
|
| | fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) |
| | pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) |
| |
|
| | self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") |
| |
|
| | |
| | |
| | attributes = tuple([f"{name}.{k}" for k in fx_keys]) |
| | self.check_pt_flax_outputs( |
| | fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes |
| | ) |
| |
|
| | |
| | elif type(fx_outputs) in [tuple, list]: |
| | self.assertEqual( |
| | type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" |
| | ) |
| | self.assertEqual( |
| | len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" |
| | ) |
| |
|
| | if attributes is not None: |
| | |
| | self.assertEqual( |
| | len(attributes), |
| | len(fx_outputs), |
| | f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", |
| | ) |
| | else: |
| | |
| | attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) |
| |
|
| | for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): |
| | self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) |
| |
|
| | elif isinstance(fx_outputs, jnp.ndarray): |
| | self.assertTrue( |
| | isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" |
| | ) |
| |
|
| | |
| | fx_outputs = np.array(fx_outputs) |
| | pt_outputs = pt_outputs.detach().to("cpu").numpy() |
| |
|
| | self.assertEqual( |
| | fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" |
| | ) |
| |
|
| | |
| | if np.isscalar(fx_outputs): |
| | fx_outputs = np.array([fx_outputs]) |
| | pt_outputs = np.array([pt_outputs]) |
| |
|
| | fx_nans = np.isnan(fx_outputs) |
| | pt_nans = np.isnan(pt_outputs) |
| |
|
| | pt_outputs[fx_nans] = 0 |
| | fx_outputs[fx_nans] = 0 |
| | pt_outputs[pt_nans] = 0 |
| | fx_outputs[pt_nans] = 0 |
| |
|
| | max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) |
| | self.assertLessEqual( |
| | max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." |
| | ) |
| | else: |
| | raise ValueError( |
| | "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" |
| | f" {type(fx_outputs)} instead." |
| | ) |
| |
|
| | @is_pt_flax_cross_test |
| | def test_equivalence_pt_to_flax(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | with self.subTest(model_class.__name__): |
| | fx_model_class_name = "Flax" + model_class.__name__ |
| |
|
| | if not hasattr(transformers, fx_model_class_name): |
| | |
| | return |
| |
|
| | |
| | config.output_hidden_states = True |
| | config.output_attentions = self.has_attentions |
| |
|
| | fx_model_class = getattr(transformers, fx_model_class_name) |
| |
|
| | |
| | pt_model = model_class(config).eval() |
| | |
| | |
| | pt_model.config.use_cache = False |
| |
|
| | |
| | fx_model = fx_model_class(config, dtype=jnp.float32) |
| |
|
| | |
| | fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() |
| |
|
| | |
| | pt_inputs = self._prepare_for_class(inputs_dict, model_class) |
| |
|
| | |
| | pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} |
| |
|
| | |
| | pt_inputs = { |
| | k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() |
| | } |
| |
|
| | |
| | fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)} |
| |
|
| | fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) |
| | fx_model.params = fx_state |
| |
|
| | |
| | pt_model.to(torch_device) |
| |
|
| | with torch.no_grad(): |
| | pt_outputs = pt_model(**pt_inputs) |
| | fx_outputs = fx_model(**fx_inputs) |
| |
|
| | fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) |
| | pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) |
| |
|
| | self.assertEqual(fx_keys, pt_keys) |
| | self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) |
| |
|
| | with tempfile.TemporaryDirectory() as tmpdirname: |
| | pt_model.save_pretrained(tmpdirname) |
| | fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) |
| |
|
| | fx_outputs_loaded = fx_model_loaded(**fx_inputs) |
| |
|
| | fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) |
| | pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) |
| |
|
| | self.assertEqual(fx_keys, pt_keys) |
| | self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) |
| |
|
| | @is_pt_flax_cross_test |
| | def test_equivalence_flax_to_pt(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | with self.subTest(model_class.__name__): |
| | fx_model_class_name = "Flax" + model_class.__name__ |
| |
|
| | if not hasattr(transformers, fx_model_class_name): |
| | |
| | return |
| |
|
| | |
| | config.output_hidden_states = True |
| | config.output_attentions = self.has_attentions |
| |
|
| | fx_model_class = getattr(transformers, fx_model_class_name) |
| |
|
| | |
| | pt_model = model_class(config).eval() |
| | |
| | |
| | pt_model.config.use_cache = False |
| |
|
| | |
| | fx_model = fx_model_class(config, dtype=jnp.float32) |
| |
|
| | |
| | fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() |
| |
|
| | |
| | pt_inputs = self._prepare_for_class(inputs_dict, model_class) |
| |
|
| | |
| | pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} |
| |
|
| | |
| | pt_inputs = { |
| | k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() |
| | } |
| |
|
| | |
| | fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)} |
| |
|
| | pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) |
| |
|
| | |
| | pt_model.tie_weights() |
| |
|
| | |
| | pt_model.to(torch_device) |
| |
|
| | with torch.no_grad(): |
| | pt_outputs = pt_model(**pt_inputs) |
| | fx_outputs = fx_model(**fx_inputs) |
| |
|
| | fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) |
| | pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) |
| |
|
| | self.assertEqual(fx_keys, pt_keys) |
| | self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) |
| |
|
| | with tempfile.TemporaryDirectory() as tmpdirname: |
| | fx_model.save_pretrained(tmpdirname) |
| | pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) |
| |
|
| | |
| | pt_model_loaded.to(torch_device) |
| | pt_model_loaded.eval() |
| |
|
| | with torch.no_grad(): |
| | pt_outputs_loaded = pt_model_loaded(**pt_inputs) |
| |
|
| | fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) |
| | pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) |
| |
|
| | self.assertEqual(fx_keys, pt_keys) |
| | self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) |
| |
|
| | def test_inputs_embeds(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.eval() |
| |
|
| | inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | if not self.is_encoder_decoder: |
| | input_ids = inputs["input_ids"] |
| | del inputs["input_ids"] |
| | else: |
| | encoder_input_ids = inputs["input_ids"] |
| | decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) |
| | del inputs["input_ids"] |
| | inputs.pop("decoder_input_ids", None) |
| |
|
| | wte = model.get_input_embeddings() |
| | if not self.is_encoder_decoder: |
| | inputs["inputs_embeds"] = wte(input_ids) |
| | else: |
| | inputs["inputs_embeds"] = wte(encoder_input_ids) |
| | inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) |
| |
|
| | with torch.no_grad(): |
| | model(**inputs)[0] |
| |
|
| | @require_torch_multi_gpu |
| | def test_multi_gpu_data_parallel_forward(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | |
| | |
| | blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] |
| | for k in blacklist_non_batched_params: |
| | inputs_dict.pop(k, None) |
| |
|
| | |
| | for k, v in inputs_dict.items(): |
| | if torch.is_tensor(v): |
| | inputs_dict[k] = v.to(0) |
| |
|
| | for model_class in self.all_model_classes: |
| | model = model_class(config=config) |
| | model.to(0) |
| | model.eval() |
| |
|
| | |
| | model = nn.DataParallel(model) |
| | with torch.no_grad(): |
| | _ = model(**self._prepare_for_class(inputs_dict, model_class)) |
| |
|
| | @require_torch_multi_gpu |
| | def test_model_parallelization(self): |
| | if not self.test_model_parallel: |
| | return |
| |
|
| | |
| | def get_current_gpu_memory_use(): |
| | """returns a list of cuda memory allocations per GPU in MBs""" |
| |
|
| | per_device_memory = [] |
| | for id in range(torch.cuda.device_count()): |
| | with torch.cuda.device(id): |
| | per_device_memory.append(torch.cuda.memory_allocated() >> 20) |
| |
|
| | return per_device_memory |
| |
|
| | |
| | config = self.model_tester.get_large_model_config() |
| |
|
| | for model_class in self.all_parallelizable_model_classes: |
| | torch.cuda.empty_cache() |
| |
|
| | |
| | |
| | memory_at_start = get_current_gpu_memory_use() |
| |
|
| | |
| | model = model_class(config) |
| | model.to("cuda:0") |
| | memory_after_model_load = get_current_gpu_memory_use() |
| |
|
| | |
| | self.assertGreater(memory_after_model_load[0], memory_at_start[0]) |
| |
|
| | del model |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| |
|
| | |
| | |
| | memory_at_start = get_current_gpu_memory_use() |
| |
|
| | |
| | model = model_class(config) |
| | model.parallelize() |
| | memory_after_parallelization = get_current_gpu_memory_use() |
| |
|
| | |
| | for n in range(len(model.device_map.keys())): |
| | self.assertGreater(memory_after_parallelization[n], memory_at_start[n]) |
| |
|
| | |
| | self.assertLess(memory_after_parallelization[0], memory_after_model_load[0]) |
| |
|
| | |
| | |
| | self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1]) |
| |
|
| | del model |
| | gc.collect() |
| | torch.cuda.empty_cache() |
| |
|
| | @require_torch_multi_gpu |
| | def test_model_parallel_equal_results(self): |
| | if not self.test_model_parallel: |
| | return |
| |
|
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_parallelizable_model_classes: |
| | inputs_dict = self._prepare_for_class(inputs_dict, model_class) |
| |
|
| | def cast_to_device(dictionary, device): |
| | output = {} |
| | for k, v in dictionary.items(): |
| | if isinstance(v, torch.Tensor): |
| | output[k] = v.to(device) |
| | else: |
| | output[k] = v |
| |
|
| | return output |
| |
|
| | model = model_class(config) |
| | output = model(**cast_to_device(inputs_dict, "cpu")) |
| |
|
| | model.parallelize() |
| |
|
| | parallel_output = model(**cast_to_device(inputs_dict, "cuda:0")) |
| |
|
| | for value, parallel_value in zip(output, parallel_output): |
| | if isinstance(value, torch.Tensor): |
| | self.assertTrue(torch.allclose(value, parallel_value.to("cpu"), atol=1e-7)) |
| | elif isinstance(value, (Tuple, List)): |
| | for value_, parallel_value_ in zip(value, parallel_value): |
| | self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7)) |
| |
|
| | @require_torch_multi_gpu |
| | def test_model_parallel_beam_search(self): |
| | if not self.test_model_parallel: |
| | return |
| |
|
| | all_generative_and_parallelizable_model_classes = tuple( |
| | set(self.all_generative_model_classes).intersection(self.all_parallelizable_model_classes) |
| | ) |
| |
|
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in all_generative_and_parallelizable_model_classes: |
| | inputs_dict = self._prepare_for_class(inputs_dict, model_class) |
| | model = model_class(config) |
| |
|
| | def cast_to_device(dictionary, device): |
| | output = {} |
| | for k, v in dictionary.items(): |
| | if isinstance(v, torch.Tensor): |
| | output[k] = v.to(device) |
| | else: |
| | output[k] = v |
| |
|
| | return output |
| |
|
| | model.parallelize() |
| | model.generate(**cast_to_device(inputs_dict, "cuda:0"), num_beams=2) |
| |
|
| | def check_device_map_is_respected(self, model, device_map): |
| | for param_name, param in model.named_parameters(): |
| | |
| | while len(param_name) > 0 and param_name not in device_map: |
| | param_name = ".".join(param_name.split(".")[:-1]) |
| | if param_name not in device_map: |
| | raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") |
| |
|
| | param_device = device_map[param_name] |
| | if param_device in ["cpu", "disk"]: |
| | self.assertEqual(param.device, torch.device("meta")) |
| | else: |
| | self.assertEqual(param.device, torch.device(param_device)) |
| |
|
| | @require_accelerate |
| | @mark.accelerate_tests |
| | @require_torch_gpu |
| | def test_disk_offload(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | if model_class._no_split_modules is None: |
| | continue |
| |
|
| | inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) |
| | model = model_class(config).eval() |
| | model = model.to(torch_device) |
| | torch.manual_seed(0) |
| | base_output = model(**inputs_dict_class) |
| |
|
| | model_size = compute_module_sizes(model)[""] |
| | max_size = int(self.model_split_percents[0] * model_size) |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.cpu().save_pretrained(tmp_dir) |
| |
|
| | max_memory = {0: max_size, "cpu": max_size} |
| | with self.assertRaises(ValueError): |
| | |
| | new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) |
| |
|
| | new_model = model_class.from_pretrained( |
| | tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir |
| | ) |
| |
|
| | self.check_device_map_is_respected(new_model, new_model.hf_device_map) |
| | torch.manual_seed(0) |
| | new_output = new_model(**inputs_dict_class) |
| |
|
| | self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) |
| |
|
| | @require_accelerate |
| | @mark.accelerate_tests |
| | @require_torch_gpu |
| | def test_cpu_offload(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | if model_class._no_split_modules is None: |
| | continue |
| |
|
| | inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) |
| | model = model_class(config).eval() |
| | model = model.to(torch_device) |
| |
|
| | torch.manual_seed(0) |
| | base_output = model(**inputs_dict_class) |
| |
|
| | model_size = compute_module_sizes(model)[""] |
| | |
| | max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents] |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.cpu().save_pretrained(tmp_dir) |
| |
|
| | for max_size in max_gpu_sizes: |
| | max_memory = {0: max_size, "cpu": model_size * 2} |
| | new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) |
| | |
| | self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) |
| |
|
| | self.check_device_map_is_respected(new_model, new_model.hf_device_map) |
| |
|
| | torch.manual_seed(0) |
| | new_output = new_model(**inputs_dict_class) |
| |
|
| | self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) |
| |
|
| | @require_accelerate |
| | @mark.accelerate_tests |
| | @require_torch_multi_gpu |
| | def test_model_parallelism(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | if model_class._no_split_modules is None: |
| | continue |
| |
|
| | inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) |
| | model = model_class(config).eval() |
| | model = model.to(torch_device) |
| |
|
| | torch.manual_seed(0) |
| | base_output = model(**inputs_dict_class) |
| |
|
| | model_size = compute_module_sizes(model)[""] |
| | |
| | max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents] |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.cpu().save_pretrained(tmp_dir) |
| |
|
| | for max_size in max_gpu_sizes: |
| | max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} |
| | new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) |
| | |
| | self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) |
| |
|
| | self.check_device_map_is_respected(new_model, new_model.hf_device_map) |
| |
|
| | torch.manual_seed(0) |
| | new_output = new_model(**inputs_dict_class) |
| |
|
| | self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) |
| |
|
| | def test_problem_types(self): |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | problem_types = [ |
| | {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, |
| | {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, |
| | {"title": "regression", "num_labels": 1, "dtype": torch.float}, |
| | ] |
| |
|
| | for model_class in self.all_model_classes: |
| | if model_class.__name__ not in [ |
| | *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), |
| | *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), |
| | ]: |
| | continue |
| |
|
| | for problem_type in problem_types: |
| | with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): |
| | config.problem_type = problem_type["title"] |
| | config.num_labels = problem_type["num_labels"] |
| |
|
| | model = model_class(config) |
| | model.to(torch_device) |
| | model.train() |
| |
|
| | inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) |
| |
|
| | if problem_type["num_labels"] > 1: |
| | inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) |
| |
|
| | inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) |
| |
|
| | |
| | |
| | |
| | |
| | with warnings.catch_warnings(record=True) as warning_list: |
| | loss = model(**inputs).loss |
| | for w in warning_list: |
| | if "Using a target size that is different to the input size" in str(w.message): |
| | raise ValueError( |
| | f"Something is going wrong in the regression problem: intercepted {w.message}" |
| | ) |
| |
|
| | loss.backward() |
| |
|
| | def test_load_with_mismatched_shapes(self): |
| | if not self.test_mismatched_shapes: |
| | return |
| | config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
| |
|
| | for model_class in self.all_model_classes: |
| | if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): |
| | continue |
| |
|
| | with self.subTest(msg=f"Testing {model_class}"): |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model = model_class(config) |
| | model.save_pretrained(tmp_dir) |
| |
|
| | |
| | with self.assertRaises(RuntimeError): |
| | new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) |
| | with self.assertRaises(RuntimeError): |
| | new_model_without_prefix = AutoModel.from_pretrained(tmp_dir, vocab_size=10) |
| |
|
| | logger = logging.get_logger("transformers.modeling_utils") |
| |
|
| | with CaptureLogger(logger) as cl: |
| | new_model = AutoModelForSequenceClassification.from_pretrained( |
| | tmp_dir, num_labels=42, ignore_mismatched_sizes=True |
| | ) |
| | self.assertIn("the shapes did not match", cl.out) |
| | new_model.to(torch_device) |
| | inputs = self._prepare_for_class(inputs_dict, model_class) |
| | logits = new_model(**inputs).logits |
| | self.assertEqual(logits.shape[1], 42) |
| |
|
| | with CaptureLogger(logger) as cl: |
| | new_model_without_prefix = AutoModel.from_pretrained( |
| | tmp_dir, vocab_size=10, ignore_mismatched_sizes=True |
| | ) |
| | self.assertIn("the shapes did not match", cl.out) |
| | input_ids = ids_tensor((2, 8), 10) |
| | new_model_without_prefix.to(torch_device) |
| | if self.is_encoder_decoder: |
| | new_model_without_prefix(input_ids, decoder_input_ids=input_ids) |
| | else: |
| | new_model_without_prefix(input_ids) |
| |
|
| |
|
| | global_rng = random.Random() |
| |
|
| |
|
| | def ids_tensor(shape, vocab_size, rng=None, name=None): |
| | |
| | if rng is None: |
| | rng = global_rng |
| |
|
| | total_dims = 1 |
| | for dim in shape: |
| | total_dims *= dim |
| |
|
| | values = [] |
| | for _ in range(total_dims): |
| | values.append(rng.randint(0, vocab_size - 1)) |
| |
|
| | return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() |
| |
|
| |
|
| | def random_attention_mask(shape, rng=None, name=None): |
| | attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) |
| | |
| | attn_mask[:, -1] = 1 |
| | return attn_mask |
| |
|
| |
|
| | def floats_tensor(shape, scale=1.0, rng=None, name=None): |
| | """Creates a random float32 tensor""" |
| | if rng is None: |
| | rng = global_rng |
| |
|
| | total_dims = 1 |
| | for dim in shape: |
| | total_dims *= dim |
| |
|
| | values = [] |
| | for _ in range(total_dims): |
| | values.append(rng.random() * scale) |
| |
|
| | return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous() |
| |
|
| |
|
| | def check_models_equal(model1, model2): |
| | models_are_equal = True |
| | for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): |
| | if model1_p.data.ne(model2_p.data).sum() > 0: |
| | models_are_equal = False |
| |
|
| | return models_are_equal |
| |
|
| |
|
| | @require_torch |
| | class ModelUtilsTest(TestCasePlus): |
| | @slow |
| | def test_model_from_pretrained(self): |
| | for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: |
| | config = BertConfig.from_pretrained(model_name) |
| | self.assertIsNotNone(config) |
| | self.assertIsInstance(config, PretrainedConfig) |
| |
|
| | model = BertModel.from_pretrained(model_name) |
| | model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) |
| | self.assertIsNotNone(model) |
| | self.assertIsInstance(model, PreTrainedModel) |
| |
|
| | self.assertEqual(len(loading_info["missing_keys"]), 0) |
| | self.assertEqual(len(loading_info["unexpected_keys"]), 8) |
| | self.assertEqual(len(loading_info["mismatched_keys"]), 0) |
| | self.assertEqual(len(loading_info["error_msgs"]), 0) |
| |
|
| | config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) |
| |
|
| | |
| | config.name_or_path = model_name |
| |
|
| | model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) |
| | self.assertEqual(model.config.output_hidden_states, True) |
| | self.assertEqual(model.config, config) |
| |
|
| | def test_model_from_pretrained_subfolder(self): |
| | config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | model = BertModel(config) |
| |
|
| | subfolder = "bert" |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(os.path.join(tmp_dir, subfolder)) |
| |
|
| | with self.assertRaises(OSError): |
| | _ = BertModel.from_pretrained(tmp_dir) |
| |
|
| | model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) |
| |
|
| | self.assertTrue(check_models_equal(model, model_loaded)) |
| |
|
| | def test_model_from_pretrained_subfolder_sharded(self): |
| | config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | model = BertModel(config) |
| |
|
| | subfolder = "bert" |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") |
| |
|
| | with self.assertRaises(OSError): |
| | _ = BertModel.from_pretrained(tmp_dir) |
| |
|
| | model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) |
| |
|
| | self.assertTrue(check_models_equal(model, model_loaded)) |
| |
|
| | def test_model_from_pretrained_hub_subfolder(self): |
| | subfolder = "bert" |
| | model_id = "hf-internal-testing/tiny-random-bert-subfolder" |
| | with self.assertRaises(OSError): |
| | _ = BertModel.from_pretrained(model_id) |
| |
|
| | model = BertModel.from_pretrained(model_id, subfolder=subfolder) |
| |
|
| | self.assertIsNotNone(model) |
| |
|
| | def test_model_from_pretrained_hub_subfolder_sharded(self): |
| | subfolder = "bert" |
| | model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" |
| | with self.assertRaises(OSError): |
| | _ = BertModel.from_pretrained(model_id) |
| |
|
| | model = BertModel.from_pretrained(model_id, subfolder=subfolder) |
| |
|
| | self.assertIsNotNone(model) |
| |
|
| | def test_model_from_pretrained_with_different_pretrained_model_name(self): |
| | model = T5ForConditionalGeneration.from_pretrained(TINY_T5) |
| | self.assertIsNotNone(model) |
| |
|
| | logger = logging.get_logger("transformers.configuration_utils") |
| | with CaptureLogger(logger) as cl: |
| | BertModel.from_pretrained(TINY_T5) |
| | self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) |
| |
|
| | def test_model_from_config_torch_dtype(self): |
| | |
| | |
| | |
| |
|
| | config = T5Config.from_pretrained(TINY_T5) |
| | model = AutoModel.from_config(config) |
| | |
| | |
| | self.assertEqual(model.dtype, torch.float32) |
| |
|
| | model = AutoModel.from_config(config, torch_dtype=torch.float16) |
| | self.assertEqual(model.dtype, torch.float16) |
| |
|
| | |
| | with self.assertRaises(ValueError): |
| | model = AutoModel.from_config(config, torch_dtype=torch.int64) |
| |
|
| | def test_model_from_pretrained_torch_dtype(self): |
| | |
| | |
| | |
| | |
| | |
| | |
| | model_path = self.get_auto_remove_tmp_dir() |
| |
|
| | |
| | model = T5ForConditionalGeneration.from_pretrained(TINY_T5) |
| | self.assertEqual(model.dtype, torch.float32) |
| |
|
| | def remove_torch_dtype(model_path): |
| | file = f"{model_path}/config.json" |
| | with open(file, "r", encoding="utf-8") as f: |
| | s = json.load(f) |
| | s.pop("torch_dtype") |
| | with open(file, "w", encoding="utf-8") as f: |
| | json.dump(s, f) |
| |
|
| | |
| | model.save_pretrained(model_path) |
| | model = T5ForConditionalGeneration.from_pretrained(model_path) |
| | self.assertEqual(model.dtype, torch.float32) |
| | |
| | model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") |
| | self.assertEqual(model.dtype, torch.float32) |
| | |
| | |
| | |
| | remove_torch_dtype(model_path) |
| | model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") |
| | self.assertEqual(model.dtype, torch.float32) |
| |
|
| | |
| | model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) |
| | self.assertEqual(model.dtype, torch.float16) |
| |
|
| | |
| | model = model.half() |
| | model.save_pretrained(model_path) |
| | |
| | model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") |
| | self.assertEqual(model.config.torch_dtype, torch.float16) |
| | self.assertEqual(model.dtype, torch.float16) |
| | |
| | with open(f"{model_path}/config.json") as f: |
| | config_dict = json.load(f) |
| | self.assertEqual(config_dict["torch_dtype"], "float16") |
| | |
| | |
| | remove_torch_dtype(model_path) |
| | model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") |
| | self.assertEqual(model.dtype, torch.float16) |
| |
|
| | |
| | model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) |
| | self.assertEqual(model.dtype, torch.float16) |
| |
|
| | |
| | |
| | model = AutoModel.from_pretrained(TINY_T5, torch_dtype="auto") |
| | |
| | |
| | self.assertNotEqual(model.config.torch_dtype, "auto") |
| | |
| | self.assertEqual(model.dtype, torch.float32) |
| | model = AutoModel.from_pretrained(TINY_T5, torch_dtype=torch.float16) |
| | self.assertEqual(model.dtype, torch.float16) |
| |
|
| | |
| | model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, torch_dtype="auto") |
| | self.assertEqual(model.dtype, torch.float32) |
| |
|
| | def test_no_super_init_config_and_model(self): |
| | config = NoSuperInitConfig(attribute=32) |
| | model = NoSuperInitModel(config) |
| |
|
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir) |
| |
|
| | new_model = NoSuperInitModel.from_pretrained(tmp_dir) |
| |
|
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.equal(p1, p2)) |
| |
|
| | def test_shard_checkpoint(self): |
| | |
| | model = torch.nn.Sequential( |
| | torch.nn.Linear(100, 200, bias=False), |
| | torch.nn.Linear(200, 200, bias=False), |
| | torch.nn.Linear(200, 100, bias=False), |
| | torch.nn.Linear(100, 50, bias=False), |
| | ) |
| | state_dict = model.state_dict() |
| |
|
| | with self.subTest("No shard when max size is bigger than model size"): |
| | shards, index = shard_checkpoint(state_dict) |
| | self.assertIsNone(index) |
| | self.assertDictEqual(shards, {WEIGHTS_NAME: state_dict}) |
| |
|
| | with self.subTest("Test sharding, no weights bigger than max size"): |
| | shards, index = shard_checkpoint(state_dict, max_shard_size="300kB") |
| | |
| | self.assertDictEqual( |
| | index, |
| | { |
| | "metadata": {"total_size": 340000}, |
| | "weight_map": { |
| | "0.weight": "pytorch_model-00001-of-00002.bin", |
| | "1.weight": "pytorch_model-00001-of-00002.bin", |
| | "2.weight": "pytorch_model-00002-of-00002.bin", |
| | "3.weight": "pytorch_model-00002-of-00002.bin", |
| | }, |
| | }, |
| | ) |
| |
|
| | shard1 = {"0.weight": state_dict["0.weight"], "1.weight": state_dict["1.weight"]} |
| | shard2 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} |
| | self.assertDictEqual( |
| | shards, {"pytorch_model-00001-of-00002.bin": shard1, "pytorch_model-00002-of-00002.bin": shard2} |
| | ) |
| |
|
| | with self.subTest("Test sharding with weights bigger than max size"): |
| | shards, index = shard_checkpoint(state_dict, max_shard_size="100kB") |
| | |
| | self.assertDictEqual( |
| | index, |
| | { |
| | "metadata": {"total_size": 340000}, |
| | "weight_map": { |
| | "0.weight": "pytorch_model-00001-of-00003.bin", |
| | "1.weight": "pytorch_model-00002-of-00003.bin", |
| | "2.weight": "pytorch_model-00003-of-00003.bin", |
| | "3.weight": "pytorch_model-00003-of-00003.bin", |
| | }, |
| | }, |
| | ) |
| |
|
| | shard1 = {"0.weight": state_dict["0.weight"]} |
| | shard2 = {"1.weight": state_dict["1.weight"]} |
| | shard3 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} |
| | self.assertDictEqual( |
| | shards, |
| | { |
| | "pytorch_model-00001-of-00003.bin": shard1, |
| | "pytorch_model-00002-of-00003.bin": shard2, |
| | "pytorch_model-00003-of-00003.bin": shard3, |
| | }, |
| | ) |
| |
|
| | def test_checkpoint_sharding_local(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| |
|
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | |
| | for max_size in ["50kB", "50kiB", "100kB", "100kiB", "200kB", "200kiB"]: |
| | model.save_pretrained(tmp_dir, max_shard_size=max_size) |
| |
|
| | |
| | shard_to_size = {} |
| | for shard in os.listdir(tmp_dir): |
| | if shard.endswith(".bin"): |
| | shard_file = os.path.join(tmp_dir, shard) |
| | shard_to_size[shard_file] = os.path.getsize(shard_file) |
| |
|
| | index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME) |
| | |
| | self.assertTrue(os.path.isfile(index_file)) |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) |
| |
|
| | |
| | for shard_file, size in shard_to_size.items(): |
| | if max_size.endswith("kiB"): |
| | max_size_int = int(max_size[:-3]) * 2**10 |
| | else: |
| | max_size_int = int(max_size[:-2]) * 10**3 |
| | |
| | |
| | if size >= max_size_int + 50000: |
| | state_dict = torch.load(shard_file) |
| | self.assertEqual(len(state_dict), 1) |
| |
|
| | |
| | with open(index_file, "r", encoding="utf-8") as f: |
| | index = json.loads(f.read()) |
| |
|
| | all_shards = set(index["weight_map"].values()) |
| | shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")} |
| | self.assertSetEqual(all_shards, shards_found) |
| |
|
| | |
| | new_model = BertModel.from_pretrained(tmp_dir) |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | def test_checkpoint_sharding_from_hub(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") |
| | |
| | ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | for p1, p2 in zip(model.parameters(), ref_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | def test_checkpoint_variant_local(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| |
|
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir, variant="v2") |
| |
|
| | weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) |
| |
|
| | weights_file = os.path.join(tmp_dir, weights_name) |
| | self.assertTrue(os.path.isfile(weights_file)) |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) |
| |
|
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained(tmp_dir) |
| |
|
| | new_model = BertModel.from_pretrained(tmp_dir, variant="v2") |
| |
|
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | def test_checkpoint_variant_local_sharded(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| |
|
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB") |
| |
|
| | weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) |
| | weights_index_file = os.path.join(tmp_dir, weights_index_name) |
| | self.assertTrue(os.path.isfile(weights_index_file)) |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) |
| |
|
| | for i in range(1, 6): |
| | weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00006"] + ["bin"]) |
| | weights_name_file = os.path.join(tmp_dir, weights_name) |
| | self.assertTrue(os.path.isfile(weights_name_file)) |
| |
|
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained(tmp_dir) |
| |
|
| | new_model = BertModel.from_pretrained(tmp_dir, variant="v2") |
| |
|
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | @require_safetensors |
| | def test_checkpoint_variant_local_safe(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| |
|
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True) |
| |
|
| | weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"]) |
| |
|
| | weights_file = os.path.join(tmp_dir, weights_name) |
| | self.assertTrue(os.path.isfile(weights_file)) |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) |
| |
|
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained(tmp_dir) |
| |
|
| | new_model = BertModel.from_pretrained(tmp_dir, variant="v2") |
| |
|
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | @require_safetensors |
| | def test_checkpoint_variant_local_sharded_safe(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| |
|
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True) |
| |
|
| | weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) |
| | weights_index_file = os.path.join(tmp_dir, weights_index_name) |
| | self.assertTrue(os.path.isfile(weights_index_file)) |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) |
| |
|
| | for i in range(1, 6): |
| | weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00006"] + ["safetensors"]) |
| | weights_name_file = os.path.join(tmp_dir, weights_name) |
| | self.assertTrue(os.path.isfile(weights_name_file)) |
| |
|
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained(tmp_dir) |
| |
|
| | new_model = BertModel.from_pretrained(tmp_dir, variant="v2") |
| |
|
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | def test_checkpoint_variant_hub(self): |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir) |
| | model = BertModel.from_pretrained( |
| | "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" |
| | ) |
| | self.assertIsNotNone(model) |
| |
|
| | def test_checkpoint_variant_hub_sharded(self): |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained( |
| | "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir |
| | ) |
| | model = BertModel.from_pretrained( |
| | "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2" |
| | ) |
| | self.assertIsNotNone(model) |
| |
|
| | @require_safetensors |
| | def test_checkpoint_variant_hub_safe(self): |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir) |
| | model = BertModel.from_pretrained( |
| | "hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2" |
| | ) |
| | self.assertIsNotNone(model) |
| |
|
| | @require_safetensors |
| | def test_checkpoint_variant_hub_sharded_safe(self): |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | with self.assertRaises(EnvironmentError): |
| | _ = BertModel.from_pretrained( |
| | "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir |
| | ) |
| | model = BertModel.from_pretrained( |
| | "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2" |
| | ) |
| | self.assertIsNotNone(model) |
| |
|
| | def test_checkpoint_variant_save_load(self): |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model = BertModel.from_pretrained( |
| | "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" |
| | ) |
| | weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) |
| |
|
| | model.save_pretrained(tmp_dir, variant="v2") |
| | |
| | self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) |
| |
|
| | model.save_pretrained(tmp_dir) |
| | |
| | weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) |
| | self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) |
| |
|
| | |
| | self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) |
| |
|
| | self.assertIsNotNone(model) |
| |
|
| | @require_accelerate |
| | @mark.accelerate_tests |
| | def test_from_pretrained_low_cpu_mem_usage_functional(self): |
| | |
| | |
| |
|
| | mnames = [ |
| | "hf-internal-testing/tiny-random-bert-sharded", |
| | "hf-internal-testing/tiny-random-bert", |
| | ] |
| | for mname in mnames: |
| | _ = BertModel.from_pretrained(mname, low_cpu_mem_usage=True) |
| |
|
| | @require_usr_bin_time |
| | @require_accelerate |
| | @mark.accelerate_tests |
| | def test_from_pretrained_low_cpu_mem_usage_measured(self): |
| | |
| |
|
| | mname = "bert-base-cased" |
| |
|
| | preamble = "from transformers import AutoModel" |
| | one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=False)' |
| | max_rss_normal = self.python_one_liner_max_rss(one_liner_str) |
| | |
| |
|
| | one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=True)' |
| | max_rss_low_mem = self.python_one_liner_max_rss(one_liner_str) |
| | |
| |
|
| | diff_bytes = max_rss_normal - max_rss_low_mem |
| | diff_percent = diff_bytes / max_rss_low_mem |
| | |
| | |
| | |
| | |
| |
|
| | self.assertGreater( |
| | diff_percent, |
| | 0.15, |
| | "should use less CPU memory for low_cpu_mem_usage=True, " |
| | f"but got max_rss_normal={max_rss_normal} and max_rss_low_mem={max_rss_low_mem}", |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | @require_accelerate |
| | @mark.accelerate_tests |
| | @require_torch_multi_gpu |
| | @slow |
| | def test_model_parallelism_gpt2(self): |
| | device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} |
| | for i in range(12): |
| | device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 |
| |
|
| | model = AutoModelForCausalLM.from_pretrained("gpt2", device_map=device_map) |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained("gpt2") |
| | inputs = tokenizer("Hello, my name is", return_tensors="pt") |
| | output = model.generate(inputs["input_ids"].to(0)) |
| |
|
| | text_output = tokenizer.decode(output[0].tolist()) |
| | self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") |
| |
|
| | @require_accelerate |
| | @mark.accelerate_tests |
| | @require_torch_gpu |
| | def test_from_pretrained_disk_offload_task_model(self): |
| | model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") |
| | device_map = { |
| | "transformer.wte": 0, |
| | "transformer.wpe": 0, |
| | "transformer.h.0": "cpu", |
| | "transformer.h.1": "cpu", |
| | "transformer.h.2": "cpu", |
| | "transformer.h.3": "disk", |
| | "transformer.h.4": "disk", |
| | "transformer.ln_f": 0, |
| | "lm_head": 0, |
| | } |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | inputs = torch.tensor([[1, 2, 3]]).to(0) |
| |
|
| | model.save_pretrained(tmp_dir) |
| | new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(0) |
| | outputs1 = new_model.to(0)(inputs) |
| |
|
| | offload_folder = os.path.join(tmp_dir, "offload") |
| | new_model_with_offload = AutoModelForCausalLM.from_pretrained( |
| | tmp_dir, device_map=device_map, offload_folder=offload_folder |
| | ) |
| | outputs2 = new_model_with_offload(inputs) |
| |
|
| | self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) |
| |
|
| | |
| | offload_folder = os.path.join(tmp_dir, "offload") |
| | new_model_with_offload = AutoModelForCausalLM.from_pretrained( |
| | tmp_dir, |
| | device_map=device_map, |
| | offload_folder=offload_folder, |
| | offload_state_dict=True, |
| | ) |
| | outputs2 = new_model_with_offload(inputs) |
| |
|
| | self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) |
| |
|
| | def test_cached_files_are_used_when_internet_is_down(self): |
| | |
| | response_mock = mock.Mock() |
| | response_mock.status_code = 500 |
| | response_mock.headers = {} |
| | response_mock.raise_for_status.side_effect = HTTPError |
| | response_mock.json.return_value = {} |
| |
|
| | |
| | _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| |
|
| | |
| | with mock.patch("requests.request", return_value=response_mock) as mock_head: |
| | _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | |
| | mock_head.assert_called() |
| |
|
| | def test_load_from_one_file(self): |
| | try: |
| | tmp_file = tempfile.mktemp() |
| | with open(tmp_file, "wb") as f: |
| | http_get( |
| | "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", f |
| | ) |
| |
|
| | config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | _ = BertModel.from_pretrained(tmp_file, config=config) |
| | finally: |
| | os.remove(tmp_file) |
| |
|
| | def test_legacy_load_from_url(self): |
| | |
| | config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | _ = BertModel.from_pretrained( |
| | "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config |
| | ) |
| |
|
| | @require_safetensors |
| | def test_use_safetensors(self): |
| | |
| | with self.assertRaises(OSError) as env_error: |
| | AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True) |
| |
|
| | self.assertTrue( |
| | "model.safetensors or model.safetensors.index.json and thus cannot be loaded with `safetensors`" |
| | in str(env_error.exception) |
| | ) |
| |
|
| | |
| | with self.assertRaises(OSError) as env_error: |
| | BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False) |
| |
|
| | self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception)) |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | CLIPTextModel.from_pretrained( |
| | "hf-internal-testing/diffusers-stable-diffusion-tiny-all", |
| | subfolder="text_encoder", |
| | use_safetensors=False, |
| | cache_dir=tmp_dir, |
| | ) |
| |
|
| | all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) |
| | self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files)) |
| | self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files)) |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | CLIPTextModel.from_pretrained( |
| | "hf-internal-testing/diffusers-stable-diffusion-tiny-all", |
| | subfolder="text_encoder", |
| | use_safetensors=True, |
| | cache_dir=tmp_dir, |
| | ) |
| |
|
| | all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) |
| | self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files)) |
| | self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files)) |
| |
|
| | @require_safetensors |
| | def test_safetensors_save_and_load(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir, safe_serialization=True) |
| | |
| | self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) |
| |
|
| | new_model = BertModel.from_pretrained(tmp_dir) |
| |
|
| | |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | @require_safetensors |
| | def test_safetensors_load_from_hub(self): |
| | safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") |
| | pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| |
|
| | |
| | for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | @require_safetensors |
| | def test_safetensors_save_and_load_sharded(self): |
| | model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") |
| | |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) |
| | self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) |
| | |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) |
| | self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) |
| |
|
| | new_model = BertModel.from_pretrained(tmp_dir) |
| |
|
| | |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | @require_safetensors |
| | def test_safetensors_load_from_hub_sharded(self): |
| | safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") |
| | pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") |
| |
|
| | |
| | for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | def test_base_model_to_head_model_load(self): |
| | base_model = BaseModel(PretrainedConfig()) |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | base_model.save_pretrained(tmp_dir) |
| |
|
| | |
| | model = ModelWithHead.from_pretrained(tmp_dir) |
| | for p1, p2 in zip(model.base.parameters(), base_model.parameters()): |
| | self.assertTrue(torch.allclose(p1, p2)) |
| |
|
| | |
| | base_state_dict = base_model.state_dict() |
| | head_state_dict = model.state_dict() |
| | base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] |
| | base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] |
| | torch.save(base_state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) |
| |
|
| | with self.assertRaisesRegex( |
| | ValueError, "The state dictionary of the model you are trying to load is corrupted." |
| | ): |
| | _ = ModelWithHead.from_pretrained(tmp_dir) |
| |
|
| | @require_torch_gpu |
| | @slow |
| | def test_pretrained_low_mem_new_config(self): |
| | |
| | model_ids = ["gpt2"] |
| |
|
| | for model_id in model_ids: |
| | model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id) |
| | model_config.n_layer = 48 |
| | model_config.n_head = 25 |
| | model_config.n_embd = 1600 |
| | model = AutoModelForCausalLM.from_pretrained( |
| | pretrained_model_name_or_path=model_id, |
| | config=model_config, |
| | ignore_mismatched_sizes=True, |
| | torch_dtype=torch.float16, |
| | low_cpu_mem_usage=True, |
| | ) |
| | model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id) |
| |
|
| | self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__) |
| |
|
| |
|
| | @require_torch |
| | @is_staging_test |
| | class ModelPushToHubTester(unittest.TestCase): |
| | @classmethod |
| | def setUpClass(cls): |
| | cls._token = TOKEN |
| | HfFolder.save_token(TOKEN) |
| |
|
| | @classmethod |
| | def tearDownClass(cls): |
| | try: |
| | delete_repo(token=cls._token, repo_id="test-model") |
| | except HTTPError: |
| | pass |
| |
|
| | try: |
| | delete_repo(token=cls._token, repo_id="valid_org/test-model-org") |
| | except HTTPError: |
| | pass |
| |
|
| | try: |
| | delete_repo(token=cls._token, repo_id="test-dynamic-model") |
| | except HTTPError: |
| | pass |
| |
|
| | def test_push_to_hub(self): |
| | config = BertConfig( |
| | vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 |
| | ) |
| | model = BertModel(config) |
| | model.push_to_hub("test-model", use_auth_token=self._token) |
| |
|
| | new_model = BertModel.from_pretrained(f"{USER}/test-model") |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.equal(p1, p2)) |
| |
|
| | |
| | delete_repo(token=self._token, repo_id="test-model") |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, use_auth_token=self._token) |
| |
|
| | new_model = BertModel.from_pretrained(f"{USER}/test-model") |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.equal(p1, p2)) |
| |
|
| | def test_push_to_hub_in_organization(self): |
| | config = BertConfig( |
| | vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 |
| | ) |
| | model = BertModel(config) |
| | model.push_to_hub("valid_org/test-model-org", use_auth_token=self._token) |
| |
|
| | new_model = BertModel.from_pretrained("valid_org/test-model-org") |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.equal(p1, p2)) |
| |
|
| | |
| | delete_repo(token=self._token, repo_id="valid_org/test-model-org") |
| |
|
| | |
| | with tempfile.TemporaryDirectory() as tmp_dir: |
| | model.save_pretrained( |
| | tmp_dir, push_to_hub=True, use_auth_token=self._token, repo_id="valid_org/test-model-org" |
| | ) |
| |
|
| | new_model = BertModel.from_pretrained("valid_org/test-model-org") |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.equal(p1, p2)) |
| |
|
| | def test_push_to_hub_dynamic_model(self): |
| | CustomConfig.register_for_auto_class() |
| | CustomModel.register_for_auto_class() |
| |
|
| | config = CustomConfig(hidden_size=32) |
| | model = CustomModel(config) |
| |
|
| | model.push_to_hub("test-dynamic-model", use_auth_token=self._token) |
| | |
| | self.assertDictEqual( |
| | config.auto_map, |
| | {"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"}, |
| | ) |
| |
|
| | new_model = AutoModel.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) |
| | |
| | self.assertEqual(new_model.__class__.__name__, "CustomModel") |
| | for p1, p2 in zip(model.parameters(), new_model.parameters()): |
| | self.assertTrue(torch.equal(p1, p2)) |
| |
|
| | config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) |
| | new_model = AutoModel.from_config(config, trust_remote_code=True) |
| | self.assertEqual(new_model.__class__.__name__, "CustomModel") |
| |
|