text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from typing import Dict, Union import numpy as np from transformers import is_torch_available, is_vision_available from transformers.agents.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText from transformers.testing_utils import get_tests_dir, is_agent_test if is_torch_available(): import torch if is_vision_available(): from PIL import Image AUTHORIZED_TYPES = ["text", "audio", "image", "any"] def create_inputs(tool_inputs: Dict[str, Dict[Union[str, type], str]]): inputs = {} for input_name, input_desc in tool_inputs.items(): input_type = input_desc["type"] if input_type == "text": inputs[input_name] = "Text input" elif input_type == "image": inputs[input_name] = Image.open( Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png" ).resize((512, 512)) elif input_type == "audio": inputs[input_name] = np.ones(3000) else: raise ValueError(f"Invalid type requested: {input_type}") return inputs def output_type(output): if isinstance(output, (str, AgentText)): return "text" elif isinstance(output, (Image.Image, AgentImage)): return "image" elif isinstance(output, (torch.Tensor, AgentAudio)): return "audio" else: raise TypeError(f"Invalid output: {output}") @is_agent_test class ToolTesterMixin: def test_inputs_output(self): self.assertTrue(hasattr(self.tool, "inputs")) self.assertTrue(hasattr(self.tool, "output_type")) inputs = self.tool.inputs self.assertTrue(isinstance(inputs, dict)) for _, input_spec in inputs.items(): self.assertTrue("type" in input_spec) self.assertTrue("description" in input_spec) self.assertTrue(input_spec["type"] in AUTHORIZED_TYPES) self.assertTrue(isinstance(input_spec["description"], str)) output_type = self.tool.output_type self.assertTrue(output_type in AUTHORIZED_TYPES) def test_common_attributes(self): self.assertTrue(hasattr(self.tool, "description")) self.assertTrue(hasattr(self.tool, "name")) self.assertTrue(hasattr(self.tool, "inputs")) self.assertTrue(hasattr(self.tool, "output_type")) def test_agent_type_output(self): inputs = create_inputs(self.tool.inputs) output = self.tool(**inputs) agent_type = AGENT_TYPE_MAPPING[self.tool.output_type] self.assertTrue(isinstance(output, agent_type)) def test_agent_types_inputs(self): inputs = create_inputs(self.tool.inputs) _inputs = [] for _input, expected_input in zip(inputs, self.tool.inputs.values()): input_type = expected_input["type"] _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) output_type = AGENT_TYPE_MAPPING[self.tool.output_type] # Should not raise an error output = self.tool(**inputs) self.assertTrue(isinstance(output, output_type))
transformers/tests/agents/test_tools_common.py/0
{ "file_path": "transformers/tests/agents/test_tools_common.py", "repo_id": "transformers", "token_count": 1461 }
440
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import unittest from transformers import AutoTokenizer, is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( EosTokenCriteria, MaxLengthCriteria, MaxTimeCriteria, StoppingCriteriaList, StopStringCriteria, validate_stopping_criteria, ) @require_torch class StoppingCriteriaTestCase(unittest.TestCase): def _get_tensors(self, length): batch_size = 3 vocab_size = 250 input_ids = ids_tensor((batch_size, length), vocab_size) scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length return input_ids, scores def test_list_criteria(self): input_ids, scores = self._get_tensors(5) criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10), MaxTimeCriteria(max_time=0.1), ] ) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(9) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(10) self.assertTrue(all(criteria(input_ids, scores))) def test_max_length_criteria(self): criteria = MaxLengthCriteria(max_length=10) input_ids, scores = self._get_tensors(5) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(9) self.assertFalse(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(10) self.assertTrue(all(criteria(input_ids, scores))) def test_max_time_criteria(self): input_ids, scores = self._get_tensors(5) criteria = MaxTimeCriteria(max_time=0.1) self.assertFalse(all(criteria(input_ids, scores))) criteria = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2) self.assertTrue(all(criteria(input_ids, scores))) def test_eos_token_criteria(self): criteria = EosTokenCriteria(eos_token_id=0) input_ids, scores = self._get_tensors(5) input_ids[:, -1] = 0 self.assertTrue(all(criteria(input_ids, scores))) input_ids, scores = self._get_tensors(5) input_ids[:2, -1] = 0 input_ids[2, -1] = 1 self.assertListEqual(criteria(input_ids, scores).tolist(), [True, True, False]) input_ids, scores = self._get_tensors(5) input_ids[:, -1] = 1 self.assertListEqual(criteria(input_ids, scores).tolist(), [False, False, False]) def test_validate_stopping_criteria(self): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 10) with self.assertWarns(UserWarning): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]), 11) stopping_criteria = validate_stopping_criteria(StoppingCriteriaList(), 11) self.assertEqual(len(stopping_criteria), 1) def test_stop_string_criteria(self): true_strings = [ "<|im_start|><|im_end|>", "<|im_start|><|im_end|<|im_end|>", ">><|im_start|>>stop", "stop", "e nd", ] false_strings = [ "<|im_start|><|im_end|", "<|im_start|><|im_end|<|im_end|", "<|im_end|><|im_start|>", "<|im_end|<>stop<|im_end|", "end", "en d", "eNd", "<|im_end|", "|im_end|>", "s", ] stop_strings = ["<|im_end|>", "stop", "e nd"] # Use a tokenizer that won't actually have special tokens for these tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" true_input_ids = tokenizer(true_strings, return_tensors="pt", padding="longest", add_special_tokens=False) false_input_ids = tokenizer(false_strings, return_tensors="pt", padding="longest", add_special_tokens=False) scores = None criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings) for i in range(len(true_strings)): self.assertTrue(criteria(true_input_ids["input_ids"][i : i + 1], scores)) for i in range(len(false_strings)): self.assertFalse(criteria(false_input_ids["input_ids"][i : i + 1], scores)) # Now try it with a tokenizer where those are actually special tokens tokenizer = AutoTokenizer.from_pretrained("cognitivecomputations/dolphin-2.5-mixtral-8x7b") tokenizer.padding_side = "left" true_input_ids = tokenizer(true_strings, return_tensors="pt", padding="longest", add_special_tokens=False) false_input_ids = tokenizer(false_strings, return_tensors="pt", padding="longest", add_special_tokens=False) criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings) for i in range(len(true_strings)): self.assertTrue(criteria(true_input_ids["input_ids"][i : i + 1], scores)) for i in range(len(false_strings)): self.assertFalse(criteria(false_input_ids["input_ids"][i : i + 1], scores)) def test_stop_string_matching_positions(self): stop_string = "stop" token_list = ["last", "top", "topper", "s", "p"] token_indices = list(range(len(token_list))) all_token_valid_positions, all_token_end_overlaps = StopStringCriteria._stop_string_get_matching_positions( token_list=token_list, token_indices=token_indices, stop_strings=[stop_string] ) valid_positions = { token_list[idx]: positions for idx, positions in all_token_valid_positions[stop_string].items() } end_overlaps = {token_list[idx]: overlaps for idx, overlaps in all_token_end_overlaps[stop_string].items()} self.assertEqual(valid_positions, {"s": [3], "last": [2]}) self.assertEqual(end_overlaps, {"top": [3], "topper": [3], "p": [1]}) def test_stop_string_embedding_vecs(self): stop_string = "stop" token_list = ["last", "top", "topper", "s", "p"] token_indices = list(range(len(token_list))) embedding_vec, max_valid_positions, max_valid_end_lens = StopStringCriteria._stop_string_create_embedding_vec( token_list=token_list, token_indices=token_indices, stop_strings=[stop_string] ) # Positions inside the stop string where the token matches (excluding end overlaps) valid_positions = embedding_vec[:, 0].tolist() self.assertEqual(valid_positions, [2, -1, -1, 3, -1]) # Overlap lengths between end of stop string and start of token end_overlaps = embedding_vec[:, 1].tolist() self.assertEqual(end_overlaps, [-1, 3, 3, -1, 1]) # Length of each token token_lengths = embedding_vec[:, 2].tolist() self.assertEqual(token_lengths, [len(token) for token in token_list]) def test_single_letter_stop_string(self): true_strings = ["a", "baa", "abc"] # "abc" is a single token false_strings = ["abbbbbbb", "b"] # "abbbbbbb" is split into multiple tokens stop_strings = ["a"] tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" true_input_ids = tokenizer(true_strings, return_tensors="pt", padding="longest", add_special_tokens=False) false_input_ids = tokenizer(false_strings, return_tensors="pt", padding="longest", add_special_tokens=False) scores = None criteria = StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings) for input_ids in true_input_ids["input_ids"]: self.assertTrue(criteria(input_ids.unsqueeze(0), scores)) for input_ids in false_input_ids["input_ids"]: self.assertFalse(criteria(input_ids.unsqueeze(0), scores)) def test_criterias_per_row(self): text = "They completed the challenging puzzle, revealing the hidden image at the end" stop_strings = ["end"] tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id inputs = tokenizer(text, return_tensors="pt", add_special_tokens=False) scores = None criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=20), StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings), ] ) # trigger stopping when at leat one criteria is satisfied, one value per batch self.assertTrue(criteria(inputs["input_ids"], scores)) # return False when neither is satisfied self.assertFalse(criteria(inputs["input_ids"][:, :-1], scores)) def test_criterias_per_row_batched(self): text = [ "They completed the challenging puzzle, revealing the hidden image at the end", "Today a dragon flew over France", "The aroma of freshly baked pizza filled the kitchen", ] stop_strings = ["end"] tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" inputs = tokenizer(text, return_tensors="pt", padding="longest", add_special_tokens=False) scores = None criteria = StoppingCriteriaList( [ MaxLengthCriteria(max_length=20), StopStringCriteria(tokenizer=tokenizer, stop_strings=stop_strings), ] ) # trigger stopping when at leat one criteria is satisfied self.assertListEqual(criteria(inputs["input_ids"], scores).tolist(), [True, False, False]) # False when neither is satisfied self.assertListEqual(criteria(inputs["input_ids"][:, :-1], scores).tolist(), [False, False, False])
transformers/tests/generation/test_stopping_criteria.py/0
{ "file_path": "transformers/tests/generation/test_stopping_criteria.py", "repo_id": "transformers", "token_count": 4605 }
441
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Bark model.""" import copy import inspect import tempfile import unittest import pytest from transformers import ( BarkCoarseConfig, BarkConfig, BarkFineConfig, BarkSemanticConfig, is_torch_available, ) from transformers.models.bark.generation_configuration_bark import ( BarkCoarseGenerationConfig, BarkFineGenerationConfig, BarkSemanticGenerationConfig, ) from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_fp16, require_torch_gpu, slow, torch_device, ) from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ..encodec.test_modeling_encodec import EncodecModelTester if is_torch_available(): import torch from transformers import ( BarkCausalModel, BarkCoarseModel, BarkFineModel, BarkModel, BarkProcessor, BarkSemanticModel, ) class BarkSemanticModelTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkSemanticConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkSemanticModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkCoarseModelTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) inputs_dict = { "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkCoarseConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkCoarseModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkFineModelTester: def __init__( self, parent, batch_size=3, # need batch_size != num_hidden_layers seq_length=4, is_training=False, # for now training is not supported use_input_mask=True, use_labels=True, vocab_size=33, output_vocab_size=33, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=15, dropout=0.1, window_size=256, initializer_range=0.02, n_codes_total=8, # for BarkFineModel n_codes_given=1, # for BarkFineModel ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.output_vocab_size = output_vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.window_size = window_size self.initializer_range = initializer_range self.bos_token_id = output_vocab_size - 1 self.eos_token_id = output_vocab_size - 1 self.pad_token_id = output_vocab_size - 1 self.n_codes_total = n_codes_total self.n_codes_given = n_codes_given self.is_encoder_decoder = False def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length, self.n_codes_total], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) # randint between self.n_codes_given - 1 and self.n_codes_total - 1 codebook_idx = ids_tensor((1,), self.n_codes_total - self.n_codes_given).item() + self.n_codes_given inputs_dict = { "codebook_idx": codebook_idx, "input_ids": input_ids, "head_mask": head_mask, "attention_mask": input_mask, } return config, inputs_dict def get_config(self): return BarkFineConfig( vocab_size=self.vocab_size, output_vocab_size=self.output_vocab_size, hidden_size=self.hidden_size, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, window_size=self.window_size, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 config.output_vocab_size = 300 return config def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = BarkFineModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["logits"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "logits" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) # test no attention_mask works outputs = model(input_ids, use_cache=True) _, past_key_values = outputs.to_tuple() output_from_no_past = model(next_input_ids)["logits"] output_from_past = model(next_tokens, past_key_values=past_key_values)["logits"] random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) class BarkModelTester: def __init__( self, parent, semantic_kwargs=None, coarse_acoustics_kwargs=None, fine_acoustics_kwargs=None, codec_kwargs=None, is_training=False, # for now training is not supported ): if semantic_kwargs is None: semantic_kwargs = {} if coarse_acoustics_kwargs is None: coarse_acoustics_kwargs = {} if fine_acoustics_kwargs is None: fine_acoustics_kwargs = {} if codec_kwargs is None: codec_kwargs = {} self.parent = parent self.semantic_model_tester = BarkSemanticModelTester(parent, **semantic_kwargs) self.coarse_acoustics_model_tester = BarkCoarseModelTester(parent, **coarse_acoustics_kwargs) self.fine_acoustics_model_tester = BarkFineModelTester(parent, **fine_acoustics_kwargs) self.codec_model_tester = EncodecModelTester(parent, **codec_kwargs) self.is_training = is_training def get_config(self): return BarkConfig.from_sub_model_configs( self.semantic_model_tester.get_config(), self.coarse_acoustics_model_tester.get_config(), self.fine_acoustics_model_tester.get_config(), self.codec_model_tester.get_config(), ) def get_pipeline_config(self): config = self.get_config() # follow the `get_pipeline_config` of the sub component models config.semantic_config.vocab_size = 300 config.coarse_acoustics_config.vocab_size = 300 config.fine_acoustics_config.vocab_size = 300 config.semantic_config.output_vocab_size = 300 config.coarse_acoustics_config.output_vocab_size = 300 config.fine_acoustics_config.output_vocab_size = 300 return config @require_torch class BarkSemanticModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (BarkSemanticModel,) if is_torch_available() else () all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False # no model_parallel for now test_resize_embeddings = True def setUp(self): self.model_tester = BarkSemanticModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkSemanticConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] # override as the input arg is called "input_embeds", not "inputs_embeds" def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) with torch.no_grad(): out_ids = model(**inputs)[0] input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): out_embeds = model(**inputs)[0] self.assertTrue(torch.allclose(out_embeds, out_ids)) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkCoarseModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): # Same tester as BarkSemanticModelTest, except for model_class and config_class all_model_classes = (BarkCoarseModel,) if is_torch_available() else () all_generative_model_classes = (BarkCausalModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False test_model_parallel = False # no model_parallel for now test_resize_embeddings = True def setUp(self): self.model_tester = BarkCoarseModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkCoarseConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs)[0] # override as the input arg is called "input_embeds", not "inputs_embeds" def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) with torch.no_grad(): out_ids = model(**inputs)[0] input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings() inputs["input_embeds"] = wte(input_ids) with torch.no_grad(): out_embeds = model(**inputs)[0] self.assertTrue(torch.allclose(out_embeds, out_ids)) @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = self.all_generative_model_classes[0](config).eval().to(torch_device) model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) @require_torch class BarkFineModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (BarkFineModel,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = False test_missing_keys = False test_pruning = False # no model_parallel for now test_model_parallel = False # torchscript disabled for now because forward with an int test_torchscript = False test_resize_embeddings = True def setUp(self): self.model_tester = BarkFineModelTester(self) self.config_tester = ConfigTester(self, config_class=BarkFineConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) input_ids = inputs["input_ids"] del inputs["input_ids"] wte = model.get_input_embeddings()[inputs_dict["codebook_idx"]] inputs["input_embeds"] = wte(input_ids[:, :, inputs_dict["codebook_idx"]]) with torch.no_grad(): model(**inputs)[0] @unittest.skip(reason="FineModel relies on codebook idx and does not return same logits") def test_inputs_embeds_matches_input_ids(self): pass @require_torch_fp16 def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] # take first codebook channel model = self.all_model_classes[0](config).eval().to(torch_device) model.half() # toy generation_configs semantic_generation_config = BarkSemanticGenerationConfig(semantic_vocab_size=0) coarse_generation_config = BarkCoarseGenerationConfig(n_coarse_codebooks=config.n_codes_given) fine_generation_config = BarkFineGenerationConfig( max_fine_history_length=config.block_size // 2, max_fine_input_length=config.block_size, n_fine_codebooks=config.n_codes_total, ) codebook_size = config.vocab_size - 1 model.generate( input_ids, history_prompt=None, temperature=None, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) model.generate( input_ids, history_prompt=None, temperature=0.7, semantic_generation_config=semantic_generation_config, coarse_generation_config=coarse_generation_config, fine_generation_config=fine_generation_config, codebook_size=codebook_size, ) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["codebook_idx", "input_ids"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_model_get_set_embeddings(self): # one embedding layer per codebook config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings()[0], (torch.nn.Embedding)) model.set_input_embeddings( torch.nn.ModuleList([torch.nn.Embedding(10, 10) for _ in range(config.n_codes_total)]) ) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x[0], torch.nn.Linear)) def test_resize_tokens_embeddings(self): # resizing tokens_embeddings of a ModuleList original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is False") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed_list = model.resize_token_embeddings(model_vocab_size) cloned_embeddings_list = [model_embed.weight.clone() for model_embed in model_embed_list] # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed_list = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix for each codebook for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed_list = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) for model_embed, cloned_embeddings in zip(model_embed_list, cloned_embeddings_list): self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. # only check for the first embedding matrix models_equal = True for p1, p2 in zip(cloned_embeddings_list[0], model_embed_list[0].weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): # resizing tokens_embeddings of a ModuleList original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is False") original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds_list = model.get_output_embeddings() for output_embeds in output_embeds_list: self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(reason="Model does not support flash_attention_2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict["input_ids"][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) other_inputs = {"output_hidden_states": True} if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @pytest.mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence_right_padding(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(reason="Model does not support flash_attention_2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, ) model.to(torch_device) dummy_input = inputs_dict["input_ids"][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 outputs = model(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, output_hidden_states=True) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(inputs_dict["codebook_idx"], dummy_input, **other_inputs) outputs_fa = model_fa(inputs_dict["codebook_idx"], dummy_input, **other_inputs) logits = outputs.hidden_states[-1] logits_fa = outputs_fa.hidden_states[-1] assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_torch class BarkModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return BarkModel.from_pretrained("suno/bark").to(torch_device) @cached_property def processor(self): return BarkProcessor.from_pretrained("suno/bark") @cached_property def inputs(self): input_ids = self.processor("In the light of the moon, a little egg lay on a leaf", voice_preset="en_speaker_6") input_ids = input_ids.to(torch_device) return input_ids @cached_property def semantic_generation_config(self): semantic_generation_config = BarkSemanticGenerationConfig(**self.model.generation_config.semantic_config) return semantic_generation_config @cached_property def coarse_generation_config(self): coarse_generation_config = BarkCoarseGenerationConfig(**self.model.generation_config.coarse_acoustics_config) return coarse_generation_config @cached_property def fine_generation_config(self): fine_generation_config = BarkFineGenerationConfig(**self.model.generation_config.fine_acoustics_config) return fine_generation_config @slow def test_generate_semantic(self): input_ids = self.inputs # check first ids expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip # greedy decoding with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_semantic_early_stop(self): input_ids = self.inputs min_eos_p = 0.01 # check first ids expected_output_ids = [7363, 321, 41, 1461, 6915, 952, 326, 41, 41, 927,] # fmt: skip # Should be able to read min_eos_p from kwargs with torch.no_grad(): torch.manual_seed(0) output_ids_without_min_eos_p = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, ) torch.manual_seed(0) output_ids_kwargs = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, min_eos_p=min_eos_p, ) self.assertListEqual(output_ids_without_min_eos_p[0, : len(expected_output_ids)].tolist(), expected_output_ids) self.assertLess(len(output_ids_kwargs[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())) # Should be able to read min_eos_p from the semantic generation config self.semantic_generation_config.min_eos_p = min_eos_p with torch.no_grad(): torch.manual_seed(0) output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=0.9, semantic_generation_config=self.semantic_generation_config, ) self.assertEqual(output_ids.shape, output_ids_kwargs.shape) self.assertLess(len(output_ids[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist())) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_coarse(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] # check first ids expected_output_ids = [11018, 11391, 10651, 11418, 10857, 11620, 10642, 11366, 10312, 11528, 10531, 11516, 10474, 11051, 10524, 11051, ] # fmt: skip with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, : len(expected_output_ids)].tolist(), expected_output_ids) @slow def test_generate_fine(self): input_ids = self.inputs history_prompt = input_ids["history_prompt"] # fmt: off expected_output_ids = [ [1018, 651, 857, 642, 312, 531, 474, 524, 524, 776,], [367, 394, 596, 342, 504, 492, 27, 27, 822, 822,], [961, 955, 221, 955, 955, 686, 939, 939, 479, 176,], [638, 365, 218, 944, 853, 363, 639, 22, 884, 456,], [302, 912, 524, 38, 174, 209, 879, 23, 910, 227,], [440, 673, 861, 666, 372, 558, 49, 172, 232, 342,], [244, 358, 123, 356, 586, 520, 499, 877, 542, 637,], [806, 685, 905, 848, 803, 810, 921, 208, 625, 203,], ] # fmt: on with torch.no_grad(): output_ids = self.model.semantic.generate( **input_ids, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, ) output_ids = self.model.coarse_acoustics.generate( output_ids, history_prompt=history_prompt, do_sample=False, temperature=1.0, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, codebook_size=self.model.generation_config.codebook_size, ) # greedy decoding output_ids = self.model.fine_acoustics.generate( output_ids, history_prompt=history_prompt, temperature=None, semantic_generation_config=self.semantic_generation_config, coarse_generation_config=self.coarse_generation_config, fine_generation_config=self.fine_generation_config, codebook_size=self.model.generation_config.codebook_size, ) self.assertListEqual(output_ids[0, :, : len(expected_output_ids[0])].tolist(), expected_output_ids) @slow def test_generate_end_to_end(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids) self.model.generate(**{key: val for (key, val) in input_ids.items() if key != "history_prompt"}) @slow def test_generate_end_to_end_with_args(self): input_ids = self.inputs with torch.no_grad(): self.model.generate(**input_ids, do_sample=True, temperature=0.6, penalty_alpha=0.6) self.model.generate(**input_ids, do_sample=True, temperature=0.6, num_beams=4) @slow def test_generate_batching(self): args = {"do_sample": False, "temperature": None} s1 = "I love HuggingFace" s2 = "In the light of the moon, a little egg lay on a leaf" voice_preset = "en_speaker_6" input_ids = self.processor([s1, s2], voice_preset=voice_preset).to(torch_device) # generate in batch outputs, audio_lengths = self.model.generate(**input_ids, **args, return_output_lengths=True) # generate one-by-one s1 = self.processor(s1, voice_preset=voice_preset).to(torch_device) s2 = self.processor(s2, voice_preset=voice_preset).to(torch_device) output1 = self.model.generate(**s1, **args) output2 = self.model.generate(**s2, **args) # up until the coarse acoustic model (included), results are the same # the fine acoustic model introduces small differences # first verify if same length (should be the same because it's decided in the coarse model) self.assertEqual(tuple(audio_lengths), (output1.shape[1], output2.shape[1])) # then assert almost equal self.assertTrue(torch.allclose(outputs[0, : audio_lengths[0]], output1.squeeze(), atol=2e-3)) self.assertTrue(torch.allclose(outputs[1, : audio_lengths[1]], output2.squeeze(), atol=2e-3)) # now test single input with return_output_lengths = True outputs, _ = self.model.generate(**s1, **args, return_output_lengths=True) self.assertTrue((outputs == output1).all().item()) @slow def test_generate_end_to_end_with_sub_models_args(self): input_ids = self.inputs with torch.no_grad(): torch.manual_seed(0) self.model.generate( **input_ids, do_sample=False, temperature=1.0, coarse_do_sample=True, coarse_temperature=0.7 ) output_ids_without_min_eos_p = self.model.generate( **input_ids, do_sample=True, temperature=0.9, coarse_do_sample=True, coarse_temperature=0.7, fine_temperature=0.3, ) output_ids_with_min_eos_p = self.model.generate( **input_ids, do_sample=True, temperature=0.9, coarse_temperature=0.7, fine_temperature=0.3, min_eos_p=0.1, ) self.assertLess( len(output_ids_with_min_eos_p[0, :].tolist()), len(output_ids_without_min_eos_p[0, :].tolist()) ) @require_torch_gpu @slow def test_generate_end_to_end_with_offload(self): input_ids = self.inputs with torch.no_grad(): # standard generation output_with_no_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0) torch.cuda.empty_cache() memory_before_offload = torch.cuda.memory_allocated() model_memory_footprint = self.model.get_memory_footprint() # activate cpu offload self.model.enable_cpu_offload() memory_after_offload = torch.cuda.memory_allocated() # checks if the model have been offloaded # CUDA memory usage after offload should be near 0, leaving room to small differences room_for_difference = 1.1 self.assertGreater( (memory_before_offload - model_memory_footprint) * room_for_difference, memory_after_offload ) # checks if device is the correct one self.assertEqual(self.model.device.type, torch_device) # checks if hooks exist self.assertTrue(hasattr(self.model.semantic, "_hf_hook")) # output with cpu offload output_with_offload = self.model.generate(**input_ids, do_sample=False, temperature=1.0) # checks if same output self.assertListAlmostEqual(output_with_no_offload.squeeze().tolist(), output_with_offload.squeeze().tolist()) def assertListAlmostEqual(self, list1, list2, tol=1e-6): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol)
transformers/tests/models/bark/test_modeling_bark.py/0
{ "file_path": "transformers/tests/models/bark/test_modeling_bark.py", "repo_id": "transformers", "token_count": 24372 }
442
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import AutoTokenizer, BertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( CaptureLogger, require_torch, require_torch_accelerator, require_torch_sdpa, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, logging, ) class BertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_model_for_causal_lm_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = BertLMHeadModel(config=config).to(torch_device).eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = BertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BertModel, BertLMHeadModel, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = (BertLMHeadModel,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": BertModel, "fill-mask": BertForMaskedLM, "question-answering": BertForQuestionAnswering, "text-classification": BertForSequenceClassification, "text-generation": BertLMHeadModel, "token-classification": BertForTokenClassification, "zero-shot": BertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True model_split_percents = [0.5, 0.8, 0.9] # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = BertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) @unittest.skip(reason="Generate needs input ids") def test_inputs_embeds_matches_input_ids_with_generate(self): # generate only works with input ids for bertforcausalLM pass def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_warning_if_padding_and_no_attention_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.model_tester.prepare_config_and_inputs() # Set pad tokens in the input_ids input_ids[0, 0] = config.pad_token_id # Check for warnings if the attention_mask is missing. logger = logging.get_logger("transformers.modeling_utils") # clear cache so we can test the warning is emitted (from `warning_once`). logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: model = BertModel(config=config) model.to(torch_device) model.eval() model(input_ids, attention_mask=None, token_type_ids=token_type_ids) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) @slow def test_model_from_pretrained(self): model_name = "google-bert/bert-base-uncased" model = BertModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_accelerator def test_torchscript_device_change(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == BertForMultipleChoice: self.skipTest(reason="BertForMultipleChoice behaves incorrectly in JIT environments.") config.torchscript = True model = model_class(config=config) inputs_dict = self._prepare_for_class(inputs_dict, model_class) traced_model = torch.jit.trace( model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(traced_model, os.path.join(tmp, "bert.pt")) loaded = torch.jit.load(os.path.join(tmp, "bert.pt"), map_location=torch_device) loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) # This test was copied from the common test_eager_matches_sdpa_generate(), but without low_cpu_mem_usage=True. # TODO: Remove this and use the parent method (in common tests) once BERT supports low_cpu_mem_usage=True. @require_torch_sdpa @slow def test_eager_matches_sdpa_generate(self): max_new_tokens = 30 if len(self.all_generative_model_classes) == 0: self.skipTest(f"{self.__class__.__name__} tests a model that does support generate: skipping this test") for model_class in self.all_generative_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, # low_cpu_mem_usage=True, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, # low_cpu_mem_usage=True, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa: raise ValueError("The SDPA model should have SDPA attention layers") # Just test that a large cache works as expected res_eager = model_eager.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) res_sdpa = model_sdpa.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) self.assertTrue(torch.allclose(res_eager, res_sdpa)) @require_torch class BertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertModel.from_pretrained("google-bert/bert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_no_head_relative_embedding_key(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) @slow def test_inference_no_head_relative_embedding_key_query(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key-query") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], expected_slice, atol=1e-4)) def test_sdpa_ignored_mask(self): pkv = [] model = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel", attn_implementation="eager") model_sdpa = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel", attn_implementation="sdpa") model = model.eval() model_sdpa = model_sdpa.eval() for _ in range(model.config.num_hidden_layers): num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads pkv.append([torch.rand(1, num_heads, 3, head_dim), torch.rand(1, num_heads, 3, head_dim)]) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") inp = tokenizer("I am in Paris and", return_tensors="pt") del inp["attention_mask"] with torch.no_grad(): res_eager = model(**inp) res_sdpa = model_sdpa(**inp) self.assertTrue( torch.allclose(res_eager.last_hidden_state, res_sdpa.last_hidden_state, atol=1e-5, rtol=1e-4) ) # Case where query length != kv_length. res_eager = model(**inp, past_key_values=pkv) res_sdpa = model_sdpa(**inp, past_key_values=pkv) self.assertTrue( torch.allclose(res_eager.last_hidden_state, res_sdpa.last_hidden_state, atol=1e-5, rtol=1e-4) )
transformers/tests/models/bert/test_modeling_bert.py/0
{ "file_path": "transformers/tests/models/bert/test_modeling_bert.py", "repo_id": "transformers", "token_count": 14904 }
443
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the Blenderbot small tokenizer.""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class BlenderbotSmallTokenizerTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/blenderbot_small-90M" tokenizer_class = BlenderbotSmallTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""] self.special_tokens_map = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "adapt act apte" output_text = "adapt act apte" return input_text, output_text def test_full_blenderbot_small_tokenizer(self): tokenizer = BlenderbotSmallTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "adapt act apte" bpe_tokens = ["adapt", "act", "ap@@", "te"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] input_bpe_tokens = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_special_tokens_small_tok(self): tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") assert tok("sam").input_ids == [1384] src_text = "I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def test_empty_word_small_tok(self): tok = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M") src_text = "I am a small frog ." src_text_dot = "." encoded = tok(src_text)["input_ids"] encoded_dot = tok(src_text_dot)["input_ids"] assert encoded[-1] == encoded_dot[0]
transformers/tests/models/blenderbot_small/test_tokenization_blenderbot_small.py/0
{ "file_path": "transformers/tests/models/blenderbot_small/test_tokenization_blenderbot_small.py", "repo_id": "transformers", "token_count": 1515 }
444
# coding=utf-8 # Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class BridgeTowerImageProcessingTester(unittest.TestCase): def __init__( self, parent, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, do_center_crop: bool = True, image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], do_pad: bool = True, batch_size=7, min_resolution=30, max_resolution=400, num_channels=3, ): super().__init__() self.parent = parent self.do_resize = do_resize self.size = size if size is not None else {"shortest_edge": 288} self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.do_center_crop = do_center_crop self.image_mean = image_mean self.image_std = image_std self.do_pad = do_pad self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to BridgeTowerImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: size = self.size["shortest_edge"] image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class BridgeTowerImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BridgeTowerImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BridgeTowerImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "size_divisor"))
transformers/tests/models/bridgetower/test_image_processing_bridgetower.py/0
{ "file_path": "transformers/tests/models/bridgetower/test_image_processing_bridgetower.py", "repo_id": "transformers", "token_count": 2598 }
445
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch CLIPSeg model.""" import inspect import os import tempfile import unittest import numpy as np import requests import transformers from transformers import CLIPSegConfig, CLIPSegProcessor, CLIPSegTextConfig, CLIPSegVisionConfig from transformers.testing_utils import ( is_flax_available, is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegTextModel, CLIPSegVisionModel from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES if is_vision_available(): from PIL import Image if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) class CLIPSegVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return CLIPSegVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = CLIPSegVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class CLIPSegVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as CLIPSeg does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (CLIPSegVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = CLIPSegVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=CLIPSegVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPSegVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "CIDAS/clipseg-rd64-refined" model = CLIPSegVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class CLIPSegTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return CLIPSegTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = CLIPSegTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class CLIPSegTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = CLIPSegTextModelTester(self) self.config_tester = ConfigTester(self, config_class=CLIPSegTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="CLIPSeg does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="CLIPSegTextModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @slow def test_model_from_pretrained(self): model_name = "CIDAS/clipseg-rd64-refined" model = CLIPSegTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class CLIPSegModelTester: def __init__( self, parent, text_kwargs=None, vision_kwargs=None, is_training=True, # This should respect the `num_hidden_layers` in `CLIPSegVisionModelTester` extract_layers=(1,), ): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = CLIPSegTextModelTester(parent, **text_kwargs) self.vision_model_tester = CLIPSegVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training self.extract_layers = extract_layers def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return CLIPSegConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64, reduce_dim=32, extract_layers=self.extract_layers, ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = CLIPSegModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def create_and_check_model_for_image_segmentation(self, config, input_ids, attention_maks, pixel_values): model = CLIPSegForImageSegmentation(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values) self.parent.assertEqual( result.logits.shape, ( self.vision_model_tester.batch_size, self.vision_model_tester.image_size, self.vision_model_tester.image_size, ), ) self.parent.assertEqual( result.conditional_embeddings.shape, (self.text_model_tester.batch_size, config.projection_dim) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class CLIPSegModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (CLIPSegModel, CLIPSegForImageSegmentation) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": CLIPSegModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # CLIPSegForImageSegmentation requires special treatment if return_labels: if model_class.__name__ == "CLIPSegForImageSegmentation": batch_size, _, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [batch_size, height, width], device=torch_device, dtype=torch.float ) return inputs_dict def setUp(self): self.model_tester = CLIPSegModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_for_image_segmentation(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="CLIPSegModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # override as the some parameters require custom initialization def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initilized as per the original implementation if "logit_scale" in name: self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) elif "film" in name or "transposed_conv" in name or "reduce" in name: # those parameters use PyTorch' default nn.Linear initialization scheme pass else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # CLIPSeg needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save CLIPSegConfig and check if we can load CLIPSegVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = CLIPSegVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save CLIPSegConfig and check if we can load CLIPSegTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = CLIPSegTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) # overwrite from common since FlaxCLIPSegModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): self.skipTest(reason="No Flax model exists for this class") fx_model_class = getattr(transformers, fx_model_class_name) # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs).to_tuple() self.assertEqual( len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch" ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # overwrite from common since FlaxCLIPSegModel returns nested output # which is not supported in the common test @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # load corresponding PyTorch class pt_model = model_class(config).eval() # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): self.skipTest(reason="No Flax model exists for this class") fx_model_class = getattr(transformers, fx_model_class_name) # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_outputs = fx_model(**fx_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual( len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch" ) for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="Training test is skipped as the model was not trained") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in MODEL_MAPPING_NAMES.values(): continue print("Model class:", model_class) model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) for k, v in inputs.items(): print(k, v.shape) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): model_name = "CIDAS/clipseg-rd64-refined" model = CLIPSegModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch class CLIPSegModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation(self): model_name = "CIDAS/clipseg-rd64-refined" processor = CLIPSegProcessor.from_pretrained(model_name) model = CLIPSegForImageSegmentation.from_pretrained(model_name).to(torch_device) image = prepare_img() texts = ["a cat", "a remote", "a blanket"] inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the predicted masks self.assertEqual( outputs.logits.shape, torch.Size((3, 352, 352)), ) expected_masks_slice = torch.tensor( [[-7.4613, -7.4785, -7.3628], [-7.3268, -7.0899, -7.1333], [-6.9838, -6.7900, -6.8913]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_masks_slice, atol=1e-3)) # verify conditional and pooled output expected_conditional = torch.tensor([0.5601, -0.0314, 0.1980]).to(torch_device) expected_pooled_output = torch.tensor([0.5036, -0.2681, -0.2644]).to(torch_device) self.assertTrue(torch.allclose(outputs.conditional_embeddings[0, :3], expected_conditional, atol=1e-3)) self.assertTrue(torch.allclose(outputs.pooled_output[0, :3], expected_pooled_output, atol=1e-3))
transformers/tests/models/clipseg/test_modeling_clipseg.py/0
{ "file_path": "transformers/tests/models/clipseg/test_modeling_clipseg.py", "repo_id": "transformers", "token_count": 14677 }
446
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class ConditionalDetrImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to ConditionalDetrImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] if w < h: expected_height = int(self.size["shortest_edge"] * h / w) expected_width = self.size["shortest_edge"] elif w > h: expected_height = self.size["shortest_edge"] expected_width = int(self.size["shortest_edge"] * w / h) else: expected_height = self.size["shortest_edge"] expected_width = self.size["shortest_edge"] else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ConditionalDetrImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ConditionalDetrImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ConditionalDetrImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) @slow def test_call_pytorch_with_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} # encode them image_processing = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them image_processing = ConditionalDetrImageProcessor(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1066]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify masks expected_masks_sum = 822873 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_detection_annotations with Detr->ConditionalDetr, facebook/detr-resnet-50 ->microsoft/conditional-detr-resnet-50 def test_batched_coco_detection_annotations(self): image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) annotations_0 = {"image_id": 39769, "annotations": target} annotations_1 = {"image_id": 39769, "annotations": target} # Adjust the bounding boxes for the resized image w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotations_1["annotations"])): coords = annotations_1["annotations"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotations_1["annotations"][i]["bbox"] = new_bbox images = [image_0, image_1] annotations = [annotations_0, annotations_1] image_processing = ConditionalDetrImageProcessor() encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, return_tensors="pt", # do_convert_annotations=True ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.6879, 0.4609, 0.0755, 0.3691], [0.2118, 0.3359, 0.2601, 0.1566], [0.5011, 0.5000, 0.9979, 1.0000], [0.5010, 0.5020, 0.9979, 0.9959], [0.3284, 0.5944, 0.5884, 0.8112], [0.8394, 0.5445, 0.3213, 0.9110], ] ) expected_boxes_1 = torch.tensor( [ [0.4130, 0.2765, 0.0453, 0.2215], [0.1272, 0.2016, 0.1561, 0.0940], [0.3757, 0.4933, 0.7488, 0.9865], [0.3759, 0.5002, 0.7492, 0.9955], [0.1971, 0.5456, 0.3532, 0.8646], [0.5790, 0.4115, 0.3430, 0.7161], ] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3)) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1)) # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_batched_coco_panoptic_annotations with Detr->ConditionalDetr def test_batched_coco_panoptic_annotations(self): # prepare image, target and masks_path image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotation_1["segments_info"])): coords = annotation_1["segments_info"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotation_1["segments_info"][i]["bbox"] = new_bbox masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") images = [image_0, image_1] annotations = [annotation_0, annotation_1] # encode them image_processing = ConditionalDetrImageProcessor(format="coco_panoptic") encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_tensors="pt", return_segmentation_masks=True, ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1066 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.2625, 0.5437, 0.4688, 0.8625], [0.7719, 0.4104, 0.4531, 0.7125], [0.5000, 0.4927, 0.9969, 0.9854], [0.1688, 0.2000, 0.2063, 0.0917], [0.5492, 0.2760, 0.0578, 0.2187], [0.4992, 0.4990, 0.9984, 0.9979], ] ) expected_boxes_1 = torch.tensor( [ [0.1576, 0.3262, 0.2814, 0.5175], [0.4634, 0.2463, 0.2720, 0.4275], [0.3002, 0.2956, 0.5985, 0.5913], [0.1013, 0.1200, 0.1238, 0.0550], [0.3297, 0.1656, 0.0347, 0.1312], [0.2997, 0.2994, 0.5994, 0.5987], ] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1e-3)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1e-3)) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1066])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1066])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1)) # Copied from tests.models.detr.test_image_processing_detr.DetrImageProcessingTest.test_max_width_max_height_resizing_and_pad_strategy with Detr->ConditionalDetr def test_max_width_max_height_resizing_and_pad_strategy(self): image_1 = torch.ones([200, 100, 3], dtype=torch.uint8) # do_pad=False, max_height=100, max_width=100, image=200x100 -> 100x50 image_processor = ConditionalDetrImageProcessor( size={"max_height": 100, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 50])) # do_pad=False, max_height=300, max_width=100, image=200x100 -> 200x100 image_processor = ConditionalDetrImageProcessor( size={"max_height": 300, "max_width": 100}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") # do_pad=True, max_height=100, max_width=100, image=200x100 -> 100x100 image_processor = ConditionalDetrImageProcessor( size={"max_height": 100, "max_width": 100}, do_pad=True, pad_size={"height": 100, "width": 100} ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 100, 100])) # do_pad=True, max_height=300, max_width=100, image=200x100 -> 300x100 image_processor = ConditionalDetrImageProcessor( size={"max_height": 300, "max_width": 100}, do_pad=True, pad_size={"height": 301, "width": 101}, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 301, 101])) ### Check for batch image_2 = torch.ones([100, 150, 3], dtype=torch.uint8) # do_pad=True, max_height=150, max_width=100, images=[200x100, 100x150] -> 150x100 image_processor = ConditionalDetrImageProcessor( size={"max_height": 150, "max_width": 100}, do_pad=True, pad_size={"height": 150, "width": 100}, ) inputs = image_processor(images=[image_1, image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([2, 3, 150, 100])) def test_longest_edge_shortest_edge_resizing_strategy(self): image_1 = torch.ones([958, 653, 3], dtype=torch.uint8) # max size is set; width < height; # do_pad=False, longest_edge=640, shortest_edge=640, image=958x653 -> 640x436 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_1], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 640, 436])) image_2 = torch.ones([653, 958, 3], dtype=torch.uint8) # max size is set; height < width; # do_pad=False, longest_edge=640, shortest_edge=640, image=653x958 -> 436x640 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 640, "shortest_edge": 640}, do_pad=False, ) inputs = image_processor(images=[image_2], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 436, 640])) image_3 = torch.ones([100, 120, 3], dtype=torch.uint8) # max size is set; width == size; height > max_size; # do_pad=False, longest_edge=118, shortest_edge=100, image=120x100 -> 118x98 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 118, "shortest_edge": 100}, do_pad=False, ) inputs = image_processor(images=[image_3], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 98, 118])) image_4 = torch.ones([128, 50, 3], dtype=torch.uint8) # max size is set; height == size; width < max_size; # do_pad=False, longest_edge=256, shortest_edge=50, image=50x128 -> 50x128 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 256, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_4], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 128, 50])) image_5 = torch.ones([50, 50, 3], dtype=torch.uint8) # max size is set; height == width; width < max_size; # do_pad=False, longest_edge=117, shortest_edge=50, image=50x50 -> 50x50 image_processor = ConditionalDetrImageProcessor( size={"longest_edge": 117, "shortest_edge": 50}, do_pad=False, ) inputs = image_processor(images=[image_5], return_tensors="pt") self.assertEqual(inputs["pixel_values"].shape, torch.Size([1, 3, 50, 50]))
transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py/0
{ "file_path": "transformers/tests/models/conditional_detr/test_image_processing_conditional_detr.py", "repo_id": "transformers", "token_count": 12999 }
447
# coding=utf-8 # Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class CPMAntTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "openbmb/cpm-ant-10b" tokenizer_class = CpmAntTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() vocab_tokens = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) @tooslow def test_pre_tokenization(self): tokenizer = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b") texts = "今天天气真好!" jieba_tokens = ["今天", "天气", "真", "好", "!"] tokens = tokenizer.tokenize(texts) self.assertListEqual(tokens, jieba_tokens) normalized_text = "今天天气真好!" input_tokens = [tokenizer.bos_token] + tokens input_jieba_tokens = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_jieba_tokens) reconstructed_text = tokenizer.decode(input_jieba_tokens) self.assertEqual(reconstructed_text, normalized_text)
transformers/tests/models/cpmant/test_tokenization_cpmant.py/0
{ "file_path": "transformers/tests/models/cpmant/test_tokenization_cpmant.py", "repo_id": "transformers", "token_count": 1090 }
448
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class TFDistilBertModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = False self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_distilbert_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_distilbert_for_masked_lm( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_distilbert_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFDistilBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_distilbert_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDistilBertForSequenceClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_distilbert_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFDistilBertForMultipleChoice(config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_distilbert_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFDistilBertForTokenClassification(config) inputs = {"input_ids": input_ids, "attention_mask": input_mask} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFDistilBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) pipeline_model_mapping = ( { "feature-extraction": TFDistilBertModel, "fill-mask": TFDistilBertForMaskedLM, "question-answering": TFDistilBertForQuestionAnswering, "text-classification": TFDistilBertForSequenceClassification, "token-classification": TFDistilBertForTokenClassification, "zero-shot": TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFDistilBertModelTester(self) self.config_tester = ConfigTester(self, config_class=DistilBertConfig, dim=37) def test_config(self): self.config_tester.run_common_tests() def test_distilbert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "distilbert/distilbert-base-cased" model = TFDistilBertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf class TFDistilBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFDistilBertModel.from_pretrained("distilbert-base-uncased") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 768] self.assertEqual(output.shape, expected_shape) expected_slice = tf.constant( [ [ [0.19261885, -0.13732955, 0.4119799], [0.22150156, -0.07422661, 0.39037204], [0.22756018, -0.0896414, 0.3701467], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
transformers/tests/models/distilbert/test_modeling_tf_distilbert.py/0
{ "file_path": "transformers/tests/models/distilbert/test_modeling_tf_distilbert.py", "repo_id": "transformers", "token_count": 4494 }
449
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class TFFlaubertModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_lengths = True self.use_token_type_ids = True self.use_labels = True self.gelu_activation = True self.sinusoidal_embeddings = False self.causal = False self.asm = False self.n_langs = 2 self.vocab_size = 99 self.n_special = 0 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.summary_type = "last" self.use_proj = True self.scope = None self.bos_token_id = 0 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length], dtype=tf.float32) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2, dtype=tf.float32) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, bos_token_id=self.bos_token_id, ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertModel(config=config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertWithLMHeadModel(config) inputs = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertForQuestionAnsweringSimple(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = TFFlaubertForSequenceClassification(config) inputs = {"input_ids": input_ids, "lengths": input_lengths} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_for_token_classification( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = TFFlaubertForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_for_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = TFFlaubertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "langs": token_type_ids, "lengths": input_lengths, } return config, inputs_dict @require_tf class TFFlaubertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) all_generative_model_classes = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable pipeline_model_mapping = ( { "feature-extraction": TFFlaubertModel, "fill-mask": TFFlaubertWithLMHeadModel, "question-answering": TFFlaubertForQuestionAnsweringSimple, "text-classification": TFFlaubertForSequenceClassification, "token-classification": TFFlaubertForTokenClassification, "zero-shot": TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = TFFlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "hf-internal-testing/tiny-random-flaubert" model = TFFlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_tf @require_sentencepiece @require_tokenizers class TFFlaubertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased") input_ids = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]], dtype=tf.int32, ) # "J'aime flaubert !" output = model(input_ids)[0] expected_shape = tf.TensorShape((1, 8, 512)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = tf.convert_to_tensor( [ [ [-1.8768773, -1.566555, 0.27072418], [-1.6920038, -0.5873505, 1.9329599], [-2.9563985, -1.6993835, 1.7972052], ] ], dtype=tf.float32, ) self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
transformers/tests/models/flaubert/test_modeling_tf_flaubert.py/0
{ "file_path": "transformers/tests/models/flaubert/test_modeling_tf_flaubert.py", "repo_id": "transformers", "token_count": 6426 }
450
# coding=utf-8 # Copyright 2020 HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class TFFunnelModelTester: """You can also import this e.g, from .test_modeling_funnel import FunnelModelTester""" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, block_sizes=[1, 1, 2], num_decoder_layers=1, d_model=32, n_head=4, d_head=8, d_inner=37, hidden_act="gelu_new", hidden_dropout=0.1, attention_dropout=0.1, activation_dropout=0.0, max_position_embeddings=512, type_vocab_size=3, initializer_std=0.02, # Set to a smaller value, so we can keep the small error threshold (1e-5) in the test num_labels=3, num_choices=4, scope=None, base=False, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.block_sizes = block_sizes self.num_decoder_layers = num_decoder_layers self.d_model = d_model self.n_head = n_head self.d_head = d_head self.d_inner = d_inner self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = 2 self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.initializer_std = initializer_std # Used in the tests to check the size of the first attention layer self.num_attention_heads = n_head # Used in the tests to check the size of the first hidden state self.hidden_size = self.d_model # Used in the tests to check the number of output hidden states/attentions self.num_hidden_layers = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: self.expected_num_hidden_layers = self.num_hidden_layers + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = FunnelConfig( vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) config.truncate_seq = False model = TFFunnelModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) config.separate_cls = False model = TFFunnelModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model)) def create_and_check_base_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelBaseModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) config.truncate_seq = False model = TFFunnelBaseModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model)) config.separate_cls = False model = TFFunnelBaseModel(config=config) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForMaskedLM(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = TFFunnelForSequenceClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_choices = self.num_choices model = TFFunnelForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): config.num_labels = self.num_labels model = TFFunnelForTokenClassification(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ): model = TFFunnelForQuestionAnswering(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFFunnelModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFFunnelModelTester(self) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @require_tf class TFFunnelBaseModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFFunnelModelTester(self, base=True) self.config_tester = ConfigTester(self, config_class=FunnelConfig) def test_config(self): self.config_tester.run_common_tests() def test_base_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
transformers/tests/models/funnel/test_modeling_tf_funnel.py/0
{ "file_path": "transformers/tests/models/funnel/test_modeling_tf_funnel.py", "repo_id": "transformers", "token_count": 6844 }
451
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class GLPNImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size_divisor=32, do_rescale=True, ): super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size_divisor = size_divisor self.do_rescale = do_rescale def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } def expected_output_image_shape(self, images): if isinstance(images[0], Image.Image): width, height = images[0].size elif isinstance(images[0], np.ndarray): height, width = images[0].shape[0], images[0].shape[1] else: height, width = images[0].shape[1], images[0].shape[2] height = height // self.size_divisor * self.size_divisor width = width // self.size_divisor * self.size_divisor return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, size_divisor=self.size_divisor, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class GLPNImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = GLPNImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = GLPNImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size_divisor")) self.assertTrue(hasattr(image_processing, "resample")) self.assertTrue(hasattr(image_processing, "do_rescale")) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processing_class.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertTrue(tuple(encoded_images.shape) == (1, *expected_output_image_shape)) self.image_processing_class.num_channels = 3
transformers/tests/models/glpn/test_image_processing_glpn.py/0
{ "file_path": "transformers/tests/models/glpn/test_image_processing_glpn.py", "repo_id": "transformers", "token_count": 2691 }
452
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch GPTNeoXJapanese model.""" import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class GPTNeoXJapaneseModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_multiple_size=4, hidden_act="gelu", hidden_dropout=0.0, attention_dropout=0.1, weight_tying=True, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_multiple_size = intermediate_multiple_size self.hidden_act = hidden_act self.hidden_dropout = hidden_dropout self.attention_dropout = attention_dropout self.weight_tying = weight_tying self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) config = self.get_config() return config, input_ids, input_mask, token_labels def get_config(self): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_multiple_size=self.intermediate_multiple_size, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, weight_tying=self.weight_tying, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() config.is_decoder = True return config, input_ids, input_mask, token_labels def create_and_check_model(self, config, input_ids, input_mask): model = GPTNeoXJapaneseModel(config=config) model.to(torch_device) model.eval() _ = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder(self, config, input_ids, input_mask): config.add_cross_attention = True model = GPTNeoXJapaneseModel(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels): model = GPTNeoXJapaneseForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask): config.is_decoder = True model = GPTNeoXJapaneseForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=input_mask, use_cache=True) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True) output_from_no_past = output_from_no_past["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, token_labels = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class GPTNeoXModelJapaneseTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () all_generative_model_classes = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) test_pruning = False test_missing_keys = False test_model_parallel = False test_head_masking = False def setUp(self): self.model_tester = GPTNeoXJapaneseModelTester(self) self.config_tester = ConfigTester(self, config_class=GPTNeoXJapaneseConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(config, input_ids, input_mask) def test_model_as_decoder(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) def test_decoder_model_past_large_inputs(self): config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask) def test_model_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) @slow def test_generation(self): model_id = "abeja/gpt-neox-japanese-2.7b" prompts = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] # fmt: skip EXPECTED_OUTPUTS = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained(model_id) model = GPTNeoXJapaneseForCausalLM.from_pretrained(model_id) predicted_outputs = [] for prompt in prompts: input_ids = tokenizer(prompt, return_tensors="pt").input_ids generated_ids = model.generate(input_ids, max_length=50) generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) predicted_outputs += generated_string self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
transformers/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py/0
{ "file_path": "transformers/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py", "repo_id": "transformers", "token_count": 4859 }
453
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the TensorFlow GroupViT model.""" from __future__ import annotations import inspect import os import random import tempfile import unittest from importlib import import_module import numpy as np import requests from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig from transformers.testing_utils import ( is_pt_tf_cross_test, require_tensorflow_probability, require_tf, require_vision, slow, ) from transformers.utils import is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFGroupViTModel, TFGroupViTTextModel, TFGroupViTVisionModel, TFSharedEmbeddings from transformers.modeling_tf_utils import keras if is_vision_available(): from PIL import Image from transformers import CLIPProcessor class TFGroupViTVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, depths=[6, 3, 3], num_group_tokens=[64, 8, 0], num_output_groups=[64, 8, 8], num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.depths = depths self.num_hidden_layers = sum(depths) self.expected_num_hidden_layers = len(depths) + 1 self.num_group_tokens = num_group_tokens self.num_output_groups = num_output_groups self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope num_patches = (image_size // patch_size) ** 2 # no [CLS] token for GroupViT self.seq_length = num_patches def prepare_config_and_inputs(self): rng = random.Random(0) pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], rng=rng) config = self.get_config() return config, pixel_values def get_config(self): return GroupViTVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, depths=self.depths, num_group_tokens=self.num_group_tokens, num_output_groups=self.num_output_groups, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = TFGroupViTVisionModel(config=config) result = model(pixel_values, training=False) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFGroupViTVisionModelTest(TFModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as GroupViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFGroupViTVisionModel,) if is_tf_available() else () test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as this model tends to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def setUp(self): self.model_tester = TFGroupViTVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_inputs_embeds(self): pass """ During saving, TensorFlow will also run with `training=True` which trigger `gumbel_softmax` that requires `tensorflow-probability`. """ @require_tensorflow_probability @slow def test_saved_model_creation(self): super().test_saved_model_creation() @unittest.skip(reason="GroupViT does not use inputs_embeds") def test_graph_mode_with_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (keras.layers.Layer)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, keras.layers.Layer)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) expected_num_attention_outputs = sum(g > 0 for g in self.model_tester.num_group_tokens) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(attentions), sum(g > 0 for g in self.model_tester.num_group_tokens)) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(attentions), expected_num_attention_outputs) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions # GroupViT returns attention grouping of each stage self.assertEqual(len(self_attentions), expected_num_attention_outputs) for i, self_attn in enumerate(self_attentions): if self_attn is None: continue self.assertListEqual( list(self_attentions[i].shape[-2:]), [ self.model_tester.num_output_groups[i], self.model_tester.num_output_groups[i - 1] if i > 0 else seq_len, ], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) seq_length = getattr(self.model_tester, "seq_length", None) self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): # `GroupViT` computes some indices using argmax, uses them as # one-hot encoding for further computation. The problem is # while PT/TF have very small difference in `y_soft` (~ 1e-9), # the argmax could be totally different, if there are at least # 2 indices with almost identical values. This leads to very # large difference in the outputs. We need specific seeds to # avoid almost identical values happening in `y_soft`. import torch seed = 338 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() @slow def test_model_from_pretrained(self): model_name = "nvidia/groupvit-gcc-yfcc" model = TFGroupViTVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip( "TFGroupViTVisionModel does not convert `hidden_states` and `attentions` to tensors as they are all of" " different dimensions, and we get `Got a non-Tensor value` error when saving the model." ) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True seq_len = getattr(self.model_tester, "seq_length", None) for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] # Check num outputs self.assertEqual(len(outputs), num_out) # Check num layers expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) # Check attention outputs image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) # Check hidden states self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [seq_len, self.model_tester.hidden_size], ) class TFGroupViTTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): rng = random.Random(0) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) # make sure the first token has attention mask `1` to ensure that, after combining the causal mask, there # is still at least one token being attended to for each batch. # TODO: Change `random_attention_mask` in PT/TF/Flax common test file, after a discussion with the team. input_mask = tf.concat( [tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1 ) config = self.get_config() return config, input_ids, input_mask def get_config(self): return GroupViTTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = TFGroupViTTextModel(config=config) result = model(input_ids, attention_mask=input_mask, training=False) result = model(input_ids, training=False) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFGroupViTTextModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFGroupViTTextModel,) if is_tf_available() else () test_pruning = False test_head_masking = False test_onnx = False def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as this model tends to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def setUp(self): self.model_tester = TFGroupViTTextModelTester(self) self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="GroupViTTextModel does not use inputs_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): model_name = "nvidia/groupvit-gcc-yfcc" model = TFGroupViTTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] # Check number of outputs self.assertEqual(len(outputs), num_out) # Check number of layers expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) # Check hidden states self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) # Check attention outputs self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) seq_length = self.model_tester.seq_length key_length = getattr(self.model_tester, "key_length", seq_length) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, key_length], ) class TFGroupViTModelTester: def __init__(self, parent, is_training=True): self.parent = parent self.text_model_tester = TFGroupViTTextModelTester(parent) self.vision_model_tester = TFGroupViTVisionModelTester(parent) self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return GroupViTConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = TFGroupViTModel(config) result = model(input_ids, pixel_values, attention_mask, training=False) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": True, } return config, inputs_dict @require_tf class TFGroupViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFGroupViTModel,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFGroupViTModel} if is_tf_available() else {} test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False test_onnx = False def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): # We override with a slightly higher tol value, as this model tends to diverge a bit more super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def setUp(self): self.model_tester = TFGroupViTModelTester(self) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="hidden_states are tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="input_embeds are tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="CLIPModel does not have input/output embeddings") def test_model_common_attributes(self): pass @require_tensorflow_probability @slow def test_keras_fit(self): super().test_keras_fit() @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self): # `GroupViT` computes some indices using argmax, uses them as # one-hot encoding for further computation. The problem is # while PT/TF have very small difference in `y_soft` (~ 1e-9), # the argmax could be totally different, if there are at least # 2 indices with almost identical values. This leads to very # large difference in the outputs. We need specific seeds to # avoid almost identical values happening in `y_soft`. import torch seed = 158 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) tf.random.set_seed(seed) return super().test_pt_tf_model_equivalence() # overwrite from common since `TFGroupViTModelTester` set `return_loss` to `True` and causes the preparation of # `symbolic_inputs` failed. def test_keras_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # remove `return_loss` to make code work if self.__class__.__name__ == "TFGroupViTModelTest": inputs_dict.pop("return_loss", None) tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(99, 32, name="shared") config.use_cache = inputs_dict.pop("use_cache", None) main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } model = keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) outputs = model(inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, keras.Model) after_outputs = model(inputs_dict) self.assert_outputs_same(after_outputs, outputs) @slow def test_model_from_pretrained(self): model_name = "nvidia/groupvit-gcc-yfcc" model = TFGroupViTModel.from_pretrained(model_name) self.assertIsNotNone(model) @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") @slow def test_saved_model_creation(self): pass @unittest.skip(reason="`saved_model` doesn't work with nested outputs so no preparation happens.") @slow def test_prepare_serving_output(self): pass # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_tf class TFGroupViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "nvidia/groupvit-gcc-yfcc" model = TFGroupViTModel.from_pretrained(model_name) processor = CLIPProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="tf" ) outputs = model(**inputs, training=False) # verify the logits self.assertEqual( outputs.logits_per_image.shape, tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = tf.constant([[13.3523, 6.3629]]) tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3)
transformers/tests/models/groupvit/test_modeling_tf_groupvit.py/0
{ "file_path": "transformers/tests/models/groupvit/test_modeling_tf_groupvit.py", "repo_id": "transformers", "token_count": 13294 }
454
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin if is_vision_available(): from PIL import Image from transformers import Idefics2ImageProcessor if is_torch_available(): import torch class Idefics2ImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, num_images=1, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_rescale=True, rescale_factor=1 / 255, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_convert_rgb=True, do_pad=True, do_image_splitting=True, ): size = size if size is not None else {"shortest_edge": 378, "longest_edge": 980} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_images = num_images self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_convert_rgb = do_convert_rgb self.do_pad = do_pad self.do_image_splitting = do_image_splitting def prepare_image_processor_dict(self): return { "do_convert_rgb": self.do_convert_rgb, "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, "do_image_splitting": self.do_image_splitting, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to BridgeTowerImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: shortest_edge = self.size["shortest_edge"] longest_edge = self.size["longest_edge"] image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] aspect_ratio = w / h if w > h and w >= longest_edge: w = longest_edge h = int(w / aspect_ratio) elif h > w and h >= longest_edge: h = longest_edge w = int(h * aspect_ratio) w = max(w, shortest_edge) h = max(h, shortest_edge) expected_height = h expected_width = w else: expected_values = [] for images in image_inputs: for image in images: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) effective_nb_images = self.num_images * 5 if self.do_image_splitting else 1 return effective_nb_images, self.num_channels, height, width def prepare_image_inputs( self, batch_size=None, min_resolution=None, max_resolution=None, num_channels=None, num_images=None, size_divisor=None, equal_resolution=False, numpify=False, torchify=False, ): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. One can specify whether the images are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" batch_size = batch_size if batch_size is not None else self.batch_size min_resolution = min_resolution if min_resolution is not None else self.min_resolution max_resolution = max_resolution if max_resolution is not None else self.max_resolution num_channels = num_channels if num_channels is not None else self.num_channels num_images = num_images if num_images is not None else self.num_images images_list = [] for i in range(batch_size): images = [] for j in range(num_images): if equal_resolution: width = height = max_resolution else: # To avoid getting image width/height 0 if size_divisor is not None: # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` min_resolution = max(size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, max_resolution), 2) images.append(np.random.randint(255, size=(num_channels, width, height), dtype=np.uint8)) images_list.append(images) if not numpify and not torchify: # PIL expects the channel dimension as last dimension images_list = [[Image.fromarray(np.moveaxis(image, 0, -1)) for image in images] for images in images_list] if torchify: images_list = [[torch.from_numpy(image) for image in images] for images in images_list] if numpify: # Numpy images are typically in channels last format images_list = [[image.transpose(1, 2, 0) for image in images] for images in images_list] return images_list @require_torch @require_vision class Idefics2ImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = Idefics2ImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = Idefics2ImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_convert_rgb")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "rescale_factor")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "do_image_splitting")) def test_call_numpy(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_numpy_4_channels(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processor_dict = self.image_processor_dict image_processor_dict["image_mean"] = [0.5, 0.5, 0.5, 0.5] image_processor_dict["image_std"] = [0.5, 0.5, 0.5, 0.5] image_processing = self.image_processing_class(**image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True) for sample_images in image_inputs: for image in sample_images: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing( image_inputs[0], input_data_format="channels_last", return_tensors="pt" ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing( image_inputs, input_data_format="channels_last", return_tensors="pt" ).pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pil(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False) for images in image_inputs: for image in images: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape) ) def test_call_pytorch(self): for image_processing_class in self.image_processor_list: # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for images in image_inputs: for image in images: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values expected_output_image_shape = self.image_processor_tester.expected_output_image_shape([image_inputs[0]]) self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape)) # Test batched expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs) encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape), )
transformers/tests/models/idefics2/test_image_processing_idefics2.py/0
{ "file_path": "transformers/tests/models/idefics2/test_image_processing_idefics2.py", "repo_id": "transformers", "token_count": 6006 }
455
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LayoutLMv3 model.""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMv3Config, LayoutLMv3ForQuestionAnswering, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3Model, ) if is_vision_available(): from PIL import Image from transformers import LayoutLMv3ImageProcessor class LayoutLMv3ModelTester: def __init__( self, parent, batch_size=2, num_channels=3, image_size=4, patch_size=2, text_seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=36, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, coordinate_size=6, shape_size=6, num_labels=3, num_choices=4, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.patch_size = patch_size self.text_seq_length = text_seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.coordinate_size = coordinate_size self.shape_size = shape_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.range_bbox = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) self.text_seq_length = text_seq_length self.image_seq_length = (image_size // patch_size) ** 2 + 1 self.seq_length = self.text_seq_length + self.image_seq_length def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels) config = LayoutLMv3Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def create_and_check_model( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3Model(config=config) model.to(torch_device) model.eval() # text + image result = model(input_ids, pixel_values=pixel_values) result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids ) result = model(input_ids, bbox=bbox, pixel_values=pixel_values, token_type_ids=token_type_ids) result = model(input_ids, bbox=bbox, pixel_values=pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) # text only result = model(input_ids) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only result = model(pixel_values=pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) ) def create_and_check_for_sequence_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForSequenceClassification(config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): config.num_labels = self.num_labels model = LayoutLMv3ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels ): model = LayoutLMv3ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, bbox=bbox, pixel_values=pixel_values, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "pixel_values": pixel_values, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class LayoutLMv3ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False test_torchscript = False test_mismatched_shapes = False all_model_classes = ( ( LayoutLMv3Model, LayoutLMv3ForSequenceClassification, LayoutLMv3ForTokenClassification, LayoutLMv3ForQuestionAnswering, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"document-question-answering": LayoutLMv3ForQuestionAnswering, "feature-extraction": LayoutLMv3Model} if is_torch_available() else {} ) # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def setUp(self): self.model_tester = LayoutLMv3ModelTester(self) self.config_tester = ConfigTester(self, config_class=LayoutLMv3Config, hidden_size=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=torch.long, device=torch_device, ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/layoutlmv3-base" model = LayoutLMv3Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class LayoutLMv3ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return LayoutLMv3ImageProcessor(apply_ocr=False) if is_vision_available() else None @slow def test_inference_no_head(self): model = LayoutLMv3Model.from_pretrained("microsoft/layoutlmv3-base").to(torch_device) image_processor = self.default_image_processor image = prepare_img() pixel_values = image_processor(images=image, return_tensors="pt").pixel_values.to(torch_device) input_ids = torch.tensor([[1, 2]]) bbox = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) # forward pass outputs = model( input_ids=input_ids.to(torch_device), bbox=bbox.to(torch_device), pixel_values=pixel_values.to(torch_device), ) # verify the logits expected_shape = torch.Size((1, 199, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/layoutlmv3/test_modeling_layoutlmv3.py/0
{ "file_path": "transformers/tests/models/layoutlmv3/test_modeling_layoutlmv3.py", "repo_id": "transformers", "token_count": 7533 }
456
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( LongformerConfig, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerSelfAttention, ) from transformers.tf_utils import shape_list class TFLongformerModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None self.attention_window = 4 # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after self.key_length = self.attention_window + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = LongformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, attention_window=self.attention_window, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_attention_mask_determinism( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFLongformerModel(config=config) attention_mask = tf.ones(input_ids.shape, dtype=tf.int64) output_with_mask = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] tf.debugging.assert_near(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], rtol=1e-4) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerModel(config=config) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertListEqual( shape_list(result.last_hidden_state), [self.batch_size, self.seq_length, self.hidden_size] ) self.parent.assertListEqual(shape_list(result.pooler_output), [self.batch_size, self.hidden_size]) def create_and_check_model_with_global_attention_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerModel(config=config) half_input_mask_length = shape_list(input_mask)[-1] // 2 global_attention_mask = tf.concat( [ tf.zeros_like(input_mask)[:, :half_input_mask_length], tf.ones_like(input_mask)[:, half_input_mask_length:], ], axis=-1, ) result = model( input_ids, attention_mask=input_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, ) result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask) result = model(input_ids, global_attention_mask=global_attention_mask) self.parent.assertListEqual( shape_list(result.last_hidden_state), [self.batch_size, self.seq_length, self.hidden_size] ) self.parent.assertListEqual(shape_list(result.pooler_output), [self.batch_size, self.hidden_size]) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerForMaskedLM(config=config) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertListEqual(shape_list(result.logits), [self.batch_size, self.seq_length, self.vocab_size]) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.return_dict = True model = TFLongformerForQuestionAnswering(config=config) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertListEqual(shape_list(result.start_logits), [self.batch_size, self.seq_length]) self.parent.assertListEqual(shape_list(result.end_logits), [self.batch_size, self.seq_length]) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFLongformerForSequenceClassification(config=config) output = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels ).logits self.parent.assertListEqual(shape_list(output), [self.batch_size, self.num_labels]) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFLongformerForTokenClassification(config=config) output = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels).logits self.parent.assertListEqual(shape_list(output), [self.batch_size, self.seq_length, self.num_labels]) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFLongformerForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) output = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, global_attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ).logits self.parent.assertListEqual(list(output.shape), [self.batch_size, self.num_choices]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs # global attention mask has to be partly defined # to trace all weights global_attention_mask = tf.concat( [tf.zeros_like(input_ids)[:, :-1], tf.ones_like(input_ids)[:, -1:]], axis=-1, ) inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "global_attention_mask": global_attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_question_answering(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs # Replace sep_token_id by some random id input_ids = tf.where(input_ids == config.sep_token_id, 0, input_ids) # Make sure there are exactly three sep_token_id input_ids = tf.concat([input_ids[:, :-3], tf.ones_like(input_ids)[:, -3:] * config.sep_token_id], axis=-1) input_mask = tf.ones_like(input_ids) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @require_tf class TFLongformerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFLongformerModel, TFLongformerForMaskedLM, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForMultipleChoice, TFLongformerForTokenClassification, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFLongformerModel, "fill-mask": TFLongformerForMaskedLM, "question-answering": TFLongformerForQuestionAnswering, "text-classification": TFLongformerForSequenceClassification, "token-classification": TFLongformerForTokenClassification, "zero-shot": TFLongformerForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = TFLongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_global_attention_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) @unittest.skip("Longformer keeps using potentially symbolic tensors in conditionals and breaks tracing.") def test_saved_model_creation(self): pass @unittest.skip("Longformer keeps using potentially symbolic tensors in conditionals and breaks tracing.") def test_compile_tf_model(self): pass @require_tf @require_sentencepiece @require_tokenizers class TFLongformerModelIntegrationTest(unittest.TestCase): def _get_hidden_states(self): return tf.convert_to_tensor( [ [ [ 4.98332758e-01, 2.69175139e00, -7.08081422e-03, 1.04915401e00, -1.83476661e00, 7.67220476e-01, 2.98580543e-01, 2.84803992e-02, ], [ -7.58357372e-01, 4.20635998e-01, -4.04739919e-02, 1.59924145e-01, 2.05135748e00, -1.15997978e00, 5.37166397e-01, 2.62873606e-01, ], [ -1.69438001e00, 4.17574660e-01, -1.49196962e00, -1.76483717e00, -1.94566312e-01, -1.71183858e00, 7.72903565e-01, -1.11557056e00, ], [ 5.44028163e-01, 2.05466114e-01, -3.63045868e-01, 2.41865062e-01, 3.20348382e-01, -9.05611176e-01, -1.92690727e-01, -1.19917547e00, ], ] ], dtype=tf.float32, ) def test_diagonalize(self): hidden_states = self._get_hidden_states() hidden_states = tf.reshape(hidden_states, (1, 8, 4)) # set seq length = 8, hidden dim = 4 chunked_hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) window_overlap_size = shape_list(chunked_hidden_states)[2] self.assertTrue(window_overlap_size == 4) padded_hidden_states = TFLongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states) self.assertTrue( shape_list(padded_hidden_states)[-1] == shape_list(chunked_hidden_states)[-1] + window_overlap_size - 1 ) # first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000] tf.debugging.assert_near(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], rtol=1e-3) tf.debugging.assert_near(padded_hidden_states[0, 0, 0, 4:], tf.zeros((3,), dtype=tf.float32), rtol=1e-3) # last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629] tf.debugging.assert_near(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], rtol=1e-3) tf.debugging.assert_near(padded_hidden_states[0, 0, -1, :3], tf.zeros((3,), dtype=tf.float32), rtol=1e-3) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(shape_list(hidden_states), [1, 4, 8]) # pad along seq length dim paddings = tf.constant([[0, 0], [0, 0], [0, 1], [0, 0]], dtype=tf.int64) hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) padded_hidden_states = TFLongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, paddings) self.assertTrue(shape_list(padded_hidden_states) == [1, 1, 8, 5]) expected_added_dim = tf.zeros((5,), dtype=tf.float32) tf.debugging.assert_near(expected_added_dim, padded_hidden_states[0, 0, -1, :], rtol=1e-6) tf.debugging.assert_near( hidden_states[0, 0, -1, :], tf.reshape(padded_hidden_states, (1, -1))[0, 24:32], rtol=1e-6 ) def test_mask_invalid_locations(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = tf.reshape(hidden_states, (batch_size, seq_length, hidden_size)) hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) hid_states_1 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states, 1) hid_states_2 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states, 2) hid_states_3 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states[:, :, :, :3], 2) hid_states_4 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states[:, :, 2:, :], 2) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_1), tf.int64)) == 8) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_2), tf.int64)) == 24) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_3), tf.int64)) == 24) self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_4), tf.int64)) == 12) def test_chunk(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = tf.reshape(hidden_states, (batch_size, seq_length, hidden_size)) chunked_hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) # expected slices across chunk and seq length dim expected_slice_along_seq_length = tf.convert_to_tensor([0.4983, -0.7584, -1.6944], dtype=tf.float32) expected_slice_along_chunk = tf.convert_to_tensor([0.4983, -1.8348, -0.7584, 2.0514], dtype=tf.float32) self.assertTrue(shape_list(chunked_hidden_states) == [1, 3, 4, 4]) tf.debugging.assert_near( chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-4 ) tf.debugging.assert_near(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-4) def test_layer_local_attn(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") layer = model.longformer.encoder.layer[0].attention.self_attention hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.shape attention_mask = tf.zeros((batch_size, seq_length), dtype=tf.float32) is_index_global_attn = tf.math.greater(attention_mask, 1) is_global_attn = tf.math.reduce_any(is_index_global_attn) attention_mask = tf.where(tf.range(4)[None, :, None, None] > 1, -10000.0, attention_mask[:, :, None, None]) is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0) layer_head_mask = None output_hidden_states = layer( [hidden_states, attention_mask, layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn] )[0] expected_slice = tf.convert_to_tensor( [0.00188, 0.012196, -0.017051, -0.025571, -0.02996, 0.017297, -0.011521, 0.004848], dtype=tf.float32 ) self.assertEqual(output_hidden_states.shape, (1, 4, 8)) tf.debugging.assert_near(output_hidden_states[0, 1], expected_slice, rtol=1e-3, atol=1e-4) def test_layer_global_attn(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") layer = model.longformer.encoder.layer[0].attention.self_attention hidden_states = self._get_hidden_states() hidden_states = tf.concat([self._get_hidden_states(), self._get_hidden_states() - 0.5], axis=0) batch_size, seq_length, hidden_size = hidden_states.shape # create attn mask attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 1, 10000.0, attention_mask_1) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 2, -10000.0, attention_mask_1) attention_mask_2 = tf.where(tf.range(4)[None, :, None, None] > 0, 10000.0, attention_mask_2) attention_mask = tf.concat([attention_mask_1, attention_mask_2], axis=0) is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0) is_index_global_attn = tf.math.greater(attention_mask[:, :, 0, 0], 0) is_global_attn = tf.math.reduce_any(is_index_global_attn) layer_head_mask = None output_hidden_states = layer( [ hidden_states, -tf.math.abs(attention_mask), layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ] )[0] self.assertEqual(output_hidden_states.shape, (2, 4, 8)) expected_slice_0 = tf.convert_to_tensor( [-0.06508, -0.039306, 0.030934, -0.03417, -0.00656, -0.01553, -0.02088, -0.04938], dtype=tf.float32 ) expected_slice_1 = tf.convert_to_tensor( [-0.04055, -0.038399, 0.0396, -0.03735, -0.03415, 0.01357, 0.00145, -0.05709], dtype=tf.float32 ) tf.debugging.assert_near(output_hidden_states[0, 2], expected_slice_0, rtol=1e-3, atol=1e-4) tf.debugging.assert_near(output_hidden_states[1, -2], expected_slice_1, rtol=1e-3, atol=1e-4) def test_layer_attn_probs(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") layer = model.longformer.encoder.layer[0].attention.self_attention hidden_states = tf.concat([self._get_hidden_states(), self._get_hidden_states() - 0.5], axis=0) batch_size, seq_length, hidden_size = hidden_states.shape # create attn mask attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 1, 10000.0, attention_mask_1) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 2, -10000.0, attention_mask_1) attention_mask_2 = tf.where(tf.range(4)[None, :, None, None] > 0, 10000.0, attention_mask_2) attention_mask = tf.concat([attention_mask_1, attention_mask_2], axis=0) is_index_masked = tf.math.less(attention_mask[:, :, 0, 0], 0) is_index_global_attn = tf.math.greater(attention_mask[:, :, 0, 0], 0) is_global_attn = tf.math.reduce_any(is_index_global_attn) layer_head_mask = None output_hidden_states, local_attentions, global_attentions = layer( [ hidden_states, -tf.math.abs(attention_mask), layer_head_mask, is_index_masked, is_index_global_attn, is_global_attn, ] ) self.assertEqual(local_attentions.shape, (2, 4, 2, 8)) self.assertEqual(global_attentions.shape, (2, 2, 3, 4)) self.assertTrue((local_attentions[0, 2:4, :, :] == 0).numpy().tolist()) self.assertTrue((local_attentions[1, 1:4, :, :] == 0).numpy().tolist()) # # The weight of all tokens with local attention must sum to 1. self.assertTrue( (tf.math.abs(tf.math.reduce_sum(global_attentions[0, :, :2, :], axis=-1) - 1) < 1e-6).numpy().tolist() ) self.assertTrue( (tf.math.abs(tf.math.reduce_sum(global_attentions[1, :, :1, :], axis=-1) - 1) < 1e-6).numpy().tolist() ) tf.debugging.assert_near( local_attentions[0, 0, 0, :], tf.convert_to_tensor([0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) tf.debugging.assert_near( local_attentions[1, 0, 0, :], tf.convert_to_tensor([0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) # All the global attention weights must sum to 1. self.assertTrue((tf.math.abs(tf.math.reduce_sum(global_attentions, axis=-1) - 1) < 1e-6).numpy().tolist()) tf.debugging.assert_near( global_attentions[0, 0, 1, :], tf.convert_to_tensor([0.2500, 0.2500, 0.2500, 0.2500], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) tf.debugging.assert_near( global_attentions[1, 0, 0, :], tf.convert_to_tensor([0.2497, 0.2500, 0.2499, 0.2504], dtype=tf.float32), rtol=1e-3, atol=1e-4, ) @slow def test_inference_no_head(self): model = TFLongformerModel.from_pretrained("allenai/longformer-base-4096") # 'Hello world!' input_ids = tf.convert_to_tensor([[0, 20920, 232, 328, 1437, 2]], dtype=tf.int64) attention_mask = tf.ones(shape_list(input_ids), dtype=tf.int64) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] expected_output_slice = tf.convert_to_tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], dtype=tf.float32) tf.debugging.assert_near(output[0, 0, -5:], expected_output_slice, rtol=1e-3, atol=1e-4) tf.debugging.assert_near(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-3, atol=1e-4) @slow def test_inference_no_head_long(self): model = TFLongformerModel.from_pretrained("allenai/longformer-base-4096") # 'Hello world! ' repeated 1000 times input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.int64) attention_mask = tf.ones(shape_list(input_ids), dtype=tf.int64) global_attention_mask = tf.zeros(shape_list(input_ids), dtype=tf.int64) # Set global attention on a few random positions global_attention_mask = tf.tensor_scatter_nd_update( global_attention_mask, tf.constant([[0, 1], [0, 4], [0, 21]], dtype=tf.int64), tf.constant([1, 1, 1], dtype=tf.int64), ) output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] expected_output_sum = tf.constant(74585.875) expected_output_mean = tf.constant(0.024267) # assert close tf.debugging.assert_near(tf.reduce_sum(output), expected_output_sum, rtol=1e-4, atol=1e-4) tf.debugging.assert_near(tf.reduce_mean(output), expected_output_mean, rtol=1e-4, atol=1e-4) @slow def test_inference_masked_lm_long(self): model = TFLongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") # 'Hello world! ' repeated 1000 times input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.int64) output = model(input_ids, labels=input_ids) loss = output.loss prediction_scores = output.logits expected_loss = tf.constant(0.0073798) expected_prediction_scores_sum = tf.constant(-610476600.0) expected_prediction_scores_mean = tf.constant(-3.03477) # assert close tf.debugging.assert_near(tf.reduce_mean(loss), expected_loss, rtol=1e-4, atol=1e-4) tf.debugging.assert_near( tf.reduce_sum(prediction_scores), expected_prediction_scores_sum, rtol=1e-4, atol=1e-4 ) tf.debugging.assert_near( tf.reduce_mean(prediction_scores), expected_prediction_scores_mean, rtol=1e-4, atol=1e-4 ) @slow def test_inference_masked_lm(self): model = TFLongformerForMaskedLM.from_pretrained("lysandre/tiny-longformer-random") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 10] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) expected_slice = tf.constant( [ [ [-0.04926379, 0.0367098, 0.02099686], [0.03940692, 0.01547744, -0.01448723], [0.03495252, -0.05900355, -0.01675752], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
transformers/tests/models/longformer/test_modeling_tf_longformer.py/0
{ "file_path": "transformers/tests/models/longformer/test_modeling_tf_longformer.py", "repo_id": "transformers", "token_count": 15156 }
457
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from typing import Dict, List, Tuple from unittest.util import safe_repr from parameterized import parameterized from transformers import AutoTokenizer, MambaConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MambaForCausalLM, MambaModel, ) from transformers.models.mamba.modeling_mamba import MambaCache from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_0 else: is_torch_greater_or_equal_than_2_0 = False class MambaModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, intermediate_size=32, hidden_act="silu", hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, num_labels=3, num_choices=4, scope=None, tie_word_embeddings=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.tie_word_embeddings = tie_word_embeddings def get_large_model_config(self): return MambaConfig.from_pretrained("hf-internal-testing/mamba-2.8b") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = ids_tensor([self.batch_size, self.seq_length], 1) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, scale_attn_by_inverse_layer_idx=scale_attn_by_inverse_layer_idx, reorder_and_upcast_attn=reorder_and_upcast_attn, ) return ( config, input_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) def get_config( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): return MambaConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, intermediate_size=self.intermediate_size, activation_function=self.hidden_act, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, tie_word_embeddings=self.tie_word_embeddings, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() return ( config, input_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) def create_and_check_mamba_model(self, config, input_ids, *args): config.output_hidden_states = True model = MambaModel(config=config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.hidden_states), config.num_hidden_layers + 1) def create_and_check_causal_lm(self, config, input_ids, *args): model = MambaForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_state_equivalency(self, config, input_ids, *args): model = MambaModel(config=config) model.to(torch_device) model.eval() outputs = model(input_ids) output_whole = outputs.last_hidden_state outputs = model( input_ids[:, :-1], use_cache=True, cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device), ) output_one = outputs.last_hidden_state # Using the state computed on the first inputs, we will get the same output outputs = model( input_ids[:, -1:], use_cache=True, cache_params=outputs.cache_params, cache_position=torch.arange(config.conv_kernel, config.conv_kernel + 1, device=input_ids.device), ) output_two = outputs.last_hidden_state self.parent.assertTrue(torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-5)) # TODO the orignal mamba does not support decoding more than 1 token neither do we def create_and_check_mamba_cached_slow_forward_and_backwards( self, config, input_ids, *args, gradient_checkpointing=False ): model = MambaModel(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() # create cache cache = model(input_ids, use_cache=True).cache_params cache.reset() # use cache token_emb = model.embeddings(input_ids) outputs = model.layers[0].mixer.slow_forward( token_emb, cache, cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device) ) loss = torch.log(1 + torch.abs(outputs.sum())) self.parent.assertEqual(loss.shape, ()) self.parent.assertEqual(outputs.shape, (self.batch_size, self.seq_length, self.hidden_size)) loss.backward() def create_and_check_mamba_lm_head_forward_and_backwards( self, config, input_ids, *args, gradient_checkpointing=False ): model = MambaForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def prepare_config_and_inputs_for_common(self): ( config, input_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @unittest.skipIf( not is_torch_greater_or_equal_than_2_0, reason="See https://github.com/huggingface/transformers/pull/24204" ) @require_torch class MambaModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MambaModel, MambaForCausalLM) if is_torch_available() else () all_generative_model_classes = (MambaForCausalLM,) if is_torch_available() else () has_attentions = False # Mamba does not support attentions fx_compatible = False # FIXME let's try to support this @ArthurZucker test_torchscript = False # FIXME let's try to support this @ArthurZucker test_missing_keys = False test_model_parallel = False test_pruning = False test_head_masking = False # Mamba does not have attention heads pipeline_model_mapping = ( {"feature-extraction": MambaModel, "text-generation": MambaForCausalLM} if is_torch_available() else {} ) def setUp(self): self.model_tester = MambaModelTester(self) self.config_tester = ConfigTester( self, config_class=MambaConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"] ) def assertInterval(self, member, container, msg=None): r""" Simple utility function to check if a member is inside an interval. """ if isinstance(member, torch.Tensor): max_value, min_value = member.max().item(), member.min().item() elif isinstance(member, list) or isinstance(member, tuple): max_value, min_value = max(member), min(member) if not isinstance(container, list): raise TypeError("container should be a list or tuple") elif len(container) != 2: raise ValueError("container should have 2 elements") expected_min, expected_max = container is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max) if not is_inside_interval: standardMsg = "%s not found in %s" % (safe_repr(member), safe_repr(container)) self.fail(self._formatMessage(msg, standardMsg)) def test_config(self): self.config_tester.run_common_tests() @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["cache_params"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = torch.nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) def test_mamba_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mamba_model(*config_and_inputs) def test_mamba_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm(*config_and_inputs) def test_state_equivalency(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_state_equivalency(*config_and_inputs) def test_mamba_cached_slow_forward_and_backwards(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mamba_cached_slow_forward_and_backwards(*config_and_inputs) def test_mamba_lm_head_forward_and_backwards(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mamba_lm_head_forward_and_backwards(*config_and_inputs) def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config=config) for name, param in model.named_parameters(): if "dt_proj.bias" in name: dt = torch.exp( torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min)) + math.log(config.time_step_min) ).clamp(min=config.time_step_floor) inv_dt = dt + torch.log(-torch.expm1(-dt)) if param.requires_grad: self.assertTrue(param.data.max().item() <= inv_dt[1]) self.assertTrue(param.data.min().item() >= inv_dt[0]) elif "A_log" in name: A = torch.arange(1, config.state_size + 1, dtype=torch.float32)[None, :] self.assertTrue(torch.allclose(param.data, torch.log(A), atol=1e-5, rtol=1e-5)) elif "D" in name: if param.requires_grad: # check if it's a ones like self.assertTrue(torch.allclose(param.data, torch.ones_like(param.data), atol=1e-5, rtol=1e-5)) @slow def test_model_from_pretrained(self): model = MambaModel.from_pretrained("hf-internal-testing/mamba-130m") self.assertIsNotNone(model) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, MambaCache): # MODIFIED PART START recursive_check(tuple_object.conv_states, dict_object.conv_states) recursive_check(tuple_object.ssm_states, dict_object.ssm_states) elif isinstance(tuple_object, (List, Tuple)): # MODIFIED PART END for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose(tuple_object, dict_object, atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) @unittest.skip("The `input_embeds` when fed don't produce the same results.") def test_beam_sample_generate(self): pass @require_torch class MambaIntegrationTests(unittest.TestCase): def setUp(self): self.model_id = "state-spaces/mamba-2.8b-hf" self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) @parameterized.expand([(torch_device,), ("cpu",)]) def test_simple_generate(self, device): tokenizer = AutoTokenizer.from_pretrained("state-spaces/mamba-130m-hf") tokenizer.pad_token = tokenizer.eos_token model = MambaForCausalLM.from_pretrained("state-spaces/mamba-130m-hf", torch_dtype=torch.float16) model.to(device) input_ids = tokenizer("Hey how are you doing?", return_tensors="pt")["input_ids"].to(device) out = model.generate(input_ids, do_sample=False, use_cache=True, max_new_tokens=10) output_sentence = tokenizer.decode(out[0, :]) self.assertEqual(output_sentence, "Hey how are you doing?\n\nI'm so glad you're here.") with torch.no_grad(): logits = model(input_ids=input_ids).logits EXPECTED_LOGITS_NO_GRAD = torch.tensor( [ -55.6875, -69.8750, -49.9062, -51.7500, -57.6875, -57.9375, -56.9688, -57.9375, -54.6875, -55.9375, -55.3125, -58.0938, -60.5625, -47.0000, -52.0312, -49.7812, -55.9375, -57.9062, -56.7812, -57.1250, -57.3438, -58.3125, -57.8125, -58.7812, -59.6250, -59.0938, -58.7188, -52.9375, -53.4688, -57.3750, -56.9375, -55.7500, -53.3125, -55.8438, -57.0000, -56.9062, -56.2188, -54.7188, -56.4375, -57.5000 ] ,dtype=torch.float32) # fmt: skip torch.testing.assert_close(logits[0, 0, :40].cpu(), EXPECTED_LOGITS_NO_GRAD, rtol=1e-3, atol=1e-3) @parameterized.expand([(torch_device,), ("cpu",)]) def test_simple_generate_cuda_kernels_tiny(self, device): expected_output = "Hello my name is John and I am a newbie to the world" input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(device) model = MambaForCausalLM.from_pretrained("state-spaces/mamba-130m-hf", torch_dtype=torch.float16).to(device) output = model.generate(input_ids, max_new_tokens=10) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output) @parameterized.expand([(torch_device,), ("cpu",)]) @slow def test_simple_generate_cuda_kernels_small(self, device): expected_output = "Hello my name is\n\nI am a\n\nI am a" input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(device) model = MambaForCausalLM.from_pretrained("state-spaces/mamba-790m-hf", torch_dtype=torch.float16).to(device) output = model.generate(input_ids, max_new_tokens=10) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output) @parameterized.expand([(torch_device,), ("cpu",)]) @slow def test_simple_generate_cuda_kernels_mid(self, device): expected_output = "Hello my name is John and I am a\n\nI am a single father of a beautiful daughter. I am a" input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(device) model = MambaForCausalLM.from_pretrained("state-spaces/mamba-1.4b-hf", torch_dtype=torch.float16).to(device) output = model.generate(input_ids, max_new_tokens=20) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output) @parameterized.expand([(torch_device,), ("cpu",)]) @slow def test_simple_generate_cuda_kernels_big(self, device): expected_output = "Hello my name is John and I am a new member of this forum. I am a retired Marine and I am a member of the Marine Corps League. I am a" input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(device) model = MambaForCausalLM.from_pretrained("state-spaces/mamba-2.8b-hf", torch_dtype=torch.float16).to(device) output = model.generate(input_ids, max_new_tokens=30) output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output) @slow def test_compile_mamba_cache(self): expected_output = "Hello my name is John and I am a\n\nI am a single father of a beautiful daughter. I am a" input_ids = self.tokenizer("Hello my name is", return_tensors="pt").input_ids.to(torch_device) model = MambaForCausalLM.from_pretrained("state-spaces/mamba-1.4b-hf", torch_dtype=torch.float16).to( torch_device ) output = model.generate(input_ids, max_new_tokens=20, cache_implementation="mamba") output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output) model.forward = torch.compile(model.forward, fullgraph=True, mode="reduce-overhead") output = model.generate(input_ids, max_new_tokens=20, cache_implementation="mamba") output_sentence = self.tokenizer.decode(output[0].tolist()) self.assertEqual(output_sentence, expected_output)
transformers/tests/models/mamba/test_modeling_mamba.py/0
{ "file_path": "transformers/tests/models/mamba/test_modeling_mamba.py", "repo_id": "transformers", "token_count": 10796 }
458
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch MGP-STR model.""" import unittest import requests from transformers import MgpstrConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MgpstrForSceneTextRecognition, MgpstrModel if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor class MgpstrModelTester: def __init__( self, parent, is_training=False, batch_size=13, image_size=(32, 128), patch_size=4, num_channels=3, max_token_length=27, num_character_labels=38, num_bpe_labels=99, num_wordpiece_labels=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, mlp_ratio=4.0, patch_embeds_hidden_size=257, output_hidden_states=None, ): self.parent = parent self.is_training = is_training self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.max_token_length = max_token_length self.num_character_labels = num_character_labels self.num_bpe_labels = num_bpe_labels self.num_wordpiece_labels = num_wordpiece_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_ratio = mlp_ratio self.patch_embeds_hidden_size = patch_embeds_hidden_size self.output_hidden_states = output_hidden_states def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) config = self.get_config() return config, pixel_values def get_config(self): return MgpstrConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, max_token_length=self.max_token_length, num_character_labels=self.num_character_labels, num_bpe_labels=self.num_bpe_labels, num_wordpiece_labels=self.num_wordpiece_labels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, mlp_ratio=self.mlp_ratio, output_hidden_states=self.output_hidden_states, ) def create_and_check_model(self, config, pixel_values): model = MgpstrForSceneTextRecognition(config) model.to(torch_device) model.eval() with torch.no_grad(): generated_ids = model(pixel_values) self.parent.assertEqual( generated_ids[0][0].shape, (self.batch_size, self.max_token_length, self.num_character_labels) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MgpstrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (MgpstrForSceneTextRecognition,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": MgpstrForSceneTextRecognition, "image-feature-extraction": MgpstrModel} if is_torch_available() else {} ) fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_attention_outputs = False def setUp(self): self.model_tester = MgpstrModelTester(self) self.config_tester = ConfigTester(self, config_class=MgpstrConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="MgpstrModel does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) @unittest.skip(reason="MgpstrModel does not support feedforward chunking") def test_feed_forward_chunking(self): pass def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.patch_embeds_hidden_size, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # override as the `logit_scale` parameter initilization is different for MgpstrModel def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if isinstance(param, (nn.Linear, nn.Conv2d, nn.LayerNorm)): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass # We will verify our results on an image from the IIIT-5k dataset def prepare_img(): url = "https://i.postimg.cc/ZKwLg2Gw/367-14.png" im = Image.open(requests.get(url, stream=True).raw).convert("RGB") return im @require_vision @require_torch class MgpstrModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "alibaba-damo/mgp-str-base" model = MgpstrForSceneTextRecognition.from_pretrained(model_name).to(torch_device) processor = MgpstrProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(inputs) # verify the logits self.assertEqual(outputs.logits[0].shape, torch.Size((1, 27, 38))) out_strs = processor.batch_decode(outputs.logits) expected_text = "ticket" self.assertEqual(out_strs["generated_text"][0], expected_text) expected_slice = torch.tensor( [[[-39.5397, -44.4024, -36.1844], [-61.4709, -63.8639, -58.3454], [-74.0225, -68.5494, -71.2164]]], device=torch_device, ) self.assertTrue(torch.allclose(outputs.logits[0][:, 1:4, 1:4], expected_slice, atol=1e-4))
transformers/tests/models/mgp_str/test_modeling_mgp_str.py/0
{ "file_path": "transformers/tests/models/mgp_str/test_modeling_mgp_str.py", "repo_id": "transformers", "token_count": 4255 }
459
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import math import unittest from transformers import MptConfig, is_torch_available from transformers.testing_utils import require_bitsandbytes, require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( AutoTokenizer, MptForCausalLM, MptForQuestionAnswering, MptForSequenceClassification, MptForTokenClassification, MptModel, ) @require_torch class MptModelTester: def __init__( self, parent, batch_size=14, seq_length=7, is_training=True, use_token_type_ids=False, use_input_mask=True, use_labels=True, use_mc_token_ids=True, vocab_size=99, hidden_size=48, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_input_mask = use_input_mask self.use_labels = use_labels self.use_mc_token_ids = use_mc_token_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_dropout_prob = attention_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = None self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 def get_large_model_config(self): return MptConfig.from_pretrained("mosaicml/mpt-7b") def prepare_config_and_inputs(self, gradient_checkpointing=False): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config(gradient_checkpointing=gradient_checkpointing) return (config, input_ids, input_mask, sequence_labels) def get_config(self, gradient_checkpointing=False): return MptConfig( vocab_size=self.vocab_size, seq_length=self.seq_length, hidden_size=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dropout=self.hidden_dropout_prob, attention_dropout=self.attention_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, num_labels=self.num_labels, gradient_checkpointing=gradient_checkpointing, dtype="float32", ) def create_and_check_mpt_model(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(len(result.past_key_values), config.n_layers) def create_and_check_mpt_model_past(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True) outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids)) outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids)) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and token_type_ids next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_mpt_model_attention_mask_past(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = self.seq_length // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past = model(input_ids, attention_mask=attn_mask).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_mpt_model_past_large_inputs(self, config, input_ids, input_mask, *args): model = MptModel(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True, ) hidden_states_from_no_past = output_from_no_past["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values, output_hidden_states=True, ) hidden_states_from_past = output_from_past["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), hidden_states_from_past.shape[-1]).item() output_from_no_past_slice = hidden_states_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = hidden_states_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args): model = MptForCausalLM(config) model.to(torch_device) model.eval() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args): config.num_labels = self.num_labels model = MptForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args): model = MptForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_question_answering_model(self, config, input_ids, input_mask, *args): model = MptForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, *args, gradient_checkpointing=False ): model = MptForCausalLM(config) model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() result = model(input_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) result.loss.backward() def create_and_check_mpt_weight_initialization(self, config, *args): model = MptModel(config) model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layers) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask, sequence_labels = config_and_inputs inputs_dict = {"input_ids": input_ids} return config, inputs_dict class MptConfigTester(ConfigTester): def __init__(self, parent, config_class=None, has_text_modality=True, common_properties=None, **kwargs): super().__init__(parent, config_class, has_text_modality, common_properties, **kwargs) def test_attn_config_as_dict(self): config = self.config_class(**self.inputs_dict, attn_config={"attn_impl": "flash", "softmax_scale": None}) self.parent.assertTrue(config.attn_config.attn_impl == "flash") self.parent.assertTrue(config.attn_config.softmax_scale is None) def run_common_tests(self): self.test_attn_config_as_dict() return super().run_common_tests() @require_torch class MptModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( MptModel, MptForCausalLM, MptForSequenceClassification, MptForTokenClassification, MptForQuestionAnswering, ) if is_torch_available() else () ) all_generative_model_classes = (MptForCausalLM,) if is_torch_available() else () fx_compatible = False test_missing_keys = False test_pruning = False test_torchscript = False test_head_masking = False pipeline_model_mapping = ( { "feature-extraction": MptModel, "question-answering": MptForQuestionAnswering, "text-classification": MptForSequenceClassification, "text-generation": MptForCausalLM, "token-classification": MptForTokenClassification, "zero-shot": MptForSequenceClassification, } if is_torch_available() else {} ) def setUp(self): self.model_tester = MptModelTester(self) self.config_tester = MptConfigTester(self, config_class=MptConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_mpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model(*config_and_inputs) def test_mpt_model_alibi_tensor(self): # test creation of alibi tensor when num heads is not a power of two config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs[0].n_heads = 6 self.model_tester.create_and_check_mpt_model(*config_and_inputs) def test_mpt_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_past(*config_and_inputs) def test_mpt_model_att_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_attention_mask_past(*config_and_inputs) def test_mpt_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_model_past_large_inputs(*config_and_inputs) def test_mpt_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_mpt_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs) def test_mpt_token_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_token_classification_model(*config_and_inputs) def test_mpt_gradient_checkpointing(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True) def test_mpt_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpt_weight_initialization(*config_and_inputs) @unittest.skip(reason="For backward compatibility the lm_head is not in the model's state dict on the Hub.") def test_model_weights_reload_no_missing_tied_weights(self): pass @slow def test_model_from_pretrained(self): model_name = "mosaicml/mpt-7b" model = MptModel.from_pretrained(model_name) self.assertIsNotNone(model) @slow @require_torch_gpu @require_bitsandbytes class MptIntegrationTests(unittest.TestCase): def test_generation_8k(self): model_id = "mosaicml/mpt-7b-8k" tokenizer = AutoTokenizer.from_pretrained(model_id) # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) input_text = "Hello" expected_output = "Hello, I'm a new user of the forum. I have a question about the \"Solaris" inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=20) decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True) self.assertEqual(decoded_output, expected_output) def test_generation(self): model_id = "mosaicml/mpt-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) input_text = "Hello" expected_output = "Hello and welcome to the first episode of the new podcast, The Frugal Feminist.\n" inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**inputs, max_new_tokens=20) decoded_output = tokenizer.decode(outputs[0], skip_special_tokens=True) self.assertEqual(decoded_output, expected_output) def test_generation_batched(self): model_id = "mosaicml/mpt-7b" tokenizer = AutoTokenizer.from_pretrained(model_id) # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) input_texts = ["Hello my name is", "Today I am going at the gym and"] tokenizer.pad_token_id = tokenizer.eos_token_id tokenizer.padding_side = "left" inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(torch_device) expected_output = [ "Hello my name is Tiffany and I am a mother of two beautiful children. I have been a nanny for the", "Today I am going at the gym and then I am going to go to the grocery store. I am going to buy some food and some", ] outputs = model.generate(**inputs, max_new_tokens=20) decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) for i, predicted_output in enumerate(decoded_outputs): self.assertEqual(predicted_output, expected_output[i]) def test_model_logits(self): model_id = "mosaicml/mpt-7b" # Load in 4bit to fit the daily CI runner GPU RAM model = MptForCausalLM.from_pretrained( model_id, torch_dtype=torch.bfloat16, device_map={"": 0}, load_in_4bit=True ) dummy_input = torch.LongTensor([[1, 2, 3, 4, 5]]).to(torch_device) outputs = model(dummy_input, output_hidden_states=True) expected_slice = torch.Tensor([-0.2520, -0.2178, -0.1953]).to(torch_device, torch.bfloat16) predicted_slice = outputs.hidden_states[-1][0, 0, :3] self.assertTrue(torch.allclose(expected_slice, predicted_slice, atol=1e-3, rtol=1e-3))
transformers/tests/models/mpt/test_modeling_mpt.py/0
{ "file_path": "transformers/tests/models/mpt/test_modeling_mpt.py", "repo_id": "transformers", "token_count": 9195 }
460
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class TestTokenizationMvp(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "RUCAIBox/mvp" tokenizer_class = MvpTokenizer rust_tokenizer_class = MvpTokenizerFast test_rust_tokenizer = True from_pretrained_filter = filter_roberta_detectors # from_pretrained_kwargs = {'add_prefix_space': True} def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" @cached_property def default_tokenizer(self): return MvpTokenizer.from_pretrained("RUCAIBox/mvp") @cached_property def default_tokenizer_fast(self): return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp") @require_torch def test_prepare_batch(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, max_length=len(expected_src_tokens), padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(expected_src_tokens, result) # Test that special tokens are reset @require_torch def test_prepare_batch_empty_target_text(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, padding=True, return_tensors="pt") # check if input_ids are returned and no labels self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("labels", batch) self.assertNotIn("decoder_attention_mask", batch) @require_torch def test_tokenizer_as_target_length(self): tgt_text = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: targets = tokenizer(text_target=tgt_text, max_length=32, padding="max_length", return_tensors="pt") self.assertEqual(32, targets["input_ids"].shape[1]) @require_torch def test_prepare_batch_not_longer_than_maxlen(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer( ["I am a small frog" * 1024, "I am a small frog"], padding=True, truncation=True, return_tensors="pt" ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 1024)) @require_torch def test_special_tokens(self): src_text = ["A long paragraph for summarization."] tgt_text = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") input_ids = inputs["input_ids"] labels = inputs["labels"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @unittest.skip def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
transformers/tests/models/mvp/test_tokenization_mvp.py/0
{ "file_path": "transformers/tests/models/mvp/test_tokenization_mvp.py", "repo_id": "transformers", "token_count": 3713 }
461
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch OneFormer model.""" import copy import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import OneFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_timm, require_torch, require_torch_accelerator, require_torch_fp16, require_torch_multi_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OneFormerForUniversalSegmentation, OneFormerModel if is_vision_available(): from transformers import OneFormerProcessor if is_vision_available(): from PIL import Image def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) return configs_no_init class OneFormerModelTester: def __init__( self, parent, batch_size=2, is_training=True, vocab_size=99, use_auxiliary_loss=False, num_queries=10, num_channels=3, min_size=32 * 8, max_size=32 * 8, num_labels=4, hidden_dim=64, sequence_length=77, n_ctx=4, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.vocab_size = vocab_size self.use_auxiliary_loss = use_auxiliary_loss self.num_queries = num_queries self.num_channels = num_channels self.min_size = min_size self.max_size = max_size self.num_labels = num_labels self.hidden_dim = hidden_dim self.sequence_length = sequence_length self.n_ctx = n_ctx def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( torch_device ) task_inputs = ( torch.randint(high=self.vocab_size, size=(self.batch_size, self.sequence_length)).to(torch_device).long() ) pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) text_inputs = ( torch.randint( high=self.vocab_size, size=(self.batch_size, self.num_queries - self.n_ctx, self.sequence_length) ) .to(torch_device) .long() ) mask_labels = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5 ).float() class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long() config = self.get_config() return config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels def get_config(self): config = OneFormerConfig( text_encoder_vocab_size=self.vocab_size, hidden_size=self.hidden_dim, num_queries=self.num_queries, num_labels=self.num_labels, encoder_feedforward_dim=32, dim_feedforward=64, encoder_layers=2, decoder_layers=2, ) config.backbone_config.embed_dim = 16 config.backbone_config.depths = [1, 1, 1, 1] config.backbone_config.hidden_size = 16 config.backbone_config.num_channels = self.num_channels config.backbone_config.num_heads = [1, 1, 2, 2] config.backbone = None config.hidden_dim = self.hidden_dim config.mask_dim = self.hidden_dim config.conv_dim = self.hidden_dim config.text_encoder_width = self.hidden_dim config.task_seq_len = self.sequence_length config.max_seq_len = self.sequence_length config.text_encoder_context_length = self.sequence_length config.text_encoder_n_ctx = self.n_ctx return config def prepare_config_and_inputs_for_common(self): config, pixel_values, task_inputs, pixel_mask, _, _, _ = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask, "task_inputs": task_inputs} return config, inputs_dict def check_output_hidden_state(self, output, config): encoder_hidden_states = output.encoder_hidden_states pixel_decoder_hidden_states = output.pixel_decoder_hidden_states transformer_decoder_hidden_states = output.transformer_decoder_hidden_states self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths)) self.parent.assertTrue(len(pixel_decoder_hidden_states), config.encoder_layers) self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers - 1) def create_and_check_oneformer_model( self, config, pixel_values, task_inputs, pixel_mask, output_hidden_states=False ): with torch.no_grad(): model = OneFormerModel(config=config) model.to(torch_device) model.eval() output = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask) output = model(pixel_values, task_inputs=task_inputs, output_hidden_states=True) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_object_queries.shape, (self.batch_size, self.num_queries, self.hidden_dim), ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_hidden_states is not None) self.parent.assertTrue(output.encoder_hidden_states is not None) if output_hidden_states: self.check_output_hidden_state(output, config) def create_and_check_oneformer_universal_segmentation_head_model( self, config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels ): model = OneFormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() def comm_check_on_output(result): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_hidden_states is not None) self.parent.assertTrue(result.pixel_decoder_hidden_states is not None) self.parent.assertTrue(result.encoder_hidden_states is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): result = model(pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask) result = model(pixel_values, task_inputs) comm_check_on_output(result) config.is_training = True model = OneFormerForUniversalSegmentation(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model( pixel_values=pixel_values, task_inputs=task_inputs, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels, text_inputs=text_inputs, ) comm_check_on_output(result) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape, torch.Size([1])) @require_torch class OneFormerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OneFormerModel, OneFormerForUniversalSegmentation) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": OneFormerModel} if is_torch_available() else {} is_encoder_decoder = False test_pruning = False test_head_masking = False test_missing_keys = False # TODO: Fix the failed tests when this model gets more usage def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "FeatureExtractionPipelineTests": return True return False def setUp(self): self.model_tester = OneFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=OneFormerConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_oneformer_model(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_oneformer_model(config, **inputs, output_hidden_states=False) def test_oneformer_universal_segmentation_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_oneformer_universal_segmentation_head_model(*config_and_inputs) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1:3] self.assertEqual(model_class.main_input_name, observed_main_input_name) @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_simple(self): pass @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="OneFormer uses two main inputs") def test_torchscript_output_hidden_state(self): pass @unittest.skip(reason="OneFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="OneFormer does not have a get_input_embeddings method") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="OneFormer is not a generative model") def test_generate_without_input_ids(self): pass @unittest.skip(reason="OneFormer does not use token embeddings") def test_resize_tokens_embeddings(self): pass @require_torch_multi_gpu @unittest.skip( reason="OneFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def test_multi_gpu_data_parallel_forward(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values", "task_inputs"] self.assertListEqual(arg_names[:2], expected_arg_names) @slow def test_model_from_pretrained(self): for model_name in ["shi-labs/oneformer_ade20k_swin_tiny"]: model = OneFormerModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_model_with_labels(self): size = (self.model_tester.min_size,) * 2 inputs = { "pixel_values": torch.randn((2, 3, *size), device=torch_device), "task_inputs": torch.randint(high=self.model_tester.vocab_size, size=(2, 77), device=torch_device).long(), "text_inputs": torch.randint( high=self.model_tester.vocab_size, size=(2, 6, 77), device=torch_device ).long(), "mask_labels": torch.randn((2, 150, *size), device=torch_device), "class_labels": torch.zeros(2, 150, device=torch_device).long(), } config = self.model_tester.get_config() config.is_training = True model = OneFormerForUniversalSegmentation(config).to(torch_device) outputs = model(**inputs) self.assertTrue(outputs.loss is not None) def test_hidden_states_output(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_oneformer_model(config, **inputs, output_hidden_states=True) def test_attention_outputs(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) outputs = model(**inputs, output_attentions=True) self.assertTrue(outputs.attentions is not None) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.contrastive_temperature = 1 configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") # only OneFormerForUniversalSegmentation has the loss model_class = self.all_model_classes[1] ( config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels, ) = self.model_tester.prepare_config_and_inputs() config.is_training = True model = model_class(config) model.to(torch_device) model.train() loss = model( pixel_values, task_inputs, text_inputs=text_inputs, mask_labels=mask_labels, class_labels=class_labels ).loss loss.backward() def test_retain_grad_hidden_states_attentions(self): # only OneFormerForUniversalSegmentation has the loss model_class = self.all_model_classes[1] ( config, pixel_values, task_inputs, text_inputs, pixel_mask, mask_labels, class_labels, ) = self.model_tester.prepare_config_and_inputs() config.output_hidden_states = True config.output_attentions = True config.is_training = True model = model_class(config) model.to(torch_device) model.train() outputs = model( pixel_values, task_inputs, text_inputs=text_inputs, mask_labels=mask_labels, class_labels=class_labels ) encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() pixel_decoder_hidden_states = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() transformer_decoder_class_predictions = outputs.transformer_decoder_class_predictions transformer_decoder_class_predictions.retain_grad() transformer_decoder_mask_predictions = outputs.transformer_decoder_mask_predictions transformer_decoder_mask_predictions.retain_grad() attentions = outputs.attentions[0][0] attentions.retain_grad() outputs.loss.backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_class_predictions.grad) self.assertIsNotNone(transformer_decoder_mask_predictions.grad) self.assertIsNotNone(attentions.grad) @require_timm def test_backbone_selection(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() config.backbone_config = None config.backbone_kwargs = {"out_indices": [1, 2, 3]} config.use_pretrained_backbone = True # Load a timm backbone # We can't load transformer checkpoint with timm backbone, as we can't specify features_only and out_indices config.backbone = "resnet18" config.use_timm_backbone = True for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "OneFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) # Load a HF backbone config.backbone = "microsoft/resnet-18" config.use_timm_backbone = False for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() if model.__class__.__name__ == "OneFormerModel": self.assertEqual(model.pixel_level_module.encoder.out_indices, [1, 2, 3]) elif model.__class__.__name__ == "OneFormerForUniversalSegmentation": self.assertEqual(model.model.pixel_level_module.encoder.out_indices, [1, 2, 3]) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class OneFormerModelIntegrationTest(unittest.TestCase): @cached_property def model_checkpoints(self): return "shi-labs/oneformer_ade20k_swin_tiny" @cached_property def default_processor(self): return OneFormerProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def test_inference_no_head(self): model = OneFormerModel.from_pretrained(self.model_checkpoints).to(torch_device) processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size self.assertEqual(inputs_shape, (1, 3, 512, 682)) task_inputs_shape = inputs["task_inputs"].shape # check size self.assertEqual(task_inputs_shape, (1, 77)) with torch.no_grad(): outputs = model(**inputs) expected_slice_hidden_state = torch.tensor( [[0.2723, 0.8280, 0.6026], [1.2699, 1.1257, 1.1444], [1.1344, 0.6153, 0.4177]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.encoder_hidden_states[-1][0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[1.0581, 1.2276, 1.2003], [1.1903, 1.2925, 1.2862], [1.158, 1.2559, 1.3216]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.pixel_decoder_hidden_states[0][0, 0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) expected_slice_hidden_state = torch.tensor( [[3.0668, -1.1833, -5.1103], [3.344, -3.362, -5.1101], [2.6017, -4.3613, -4.1444]] ).to(torch_device) self.assertTrue( torch.allclose( outputs.transformer_decoder_class_predictions[0, :3, :3], expected_slice_hidden_state, atol=TOLERANCE ) ) def test_inference_universal_segmentation_head(self): model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(torch_device).eval() processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device) inputs_shape = inputs["pixel_values"].shape # check size self.assertEqual(inputs_shape, (1, 3, 512, 682)) with torch.no_grad(): outputs = model(**inputs) # masks_queries_logits masks_queries_logits = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, (inputs_shape[-1] + 2) // 4), ) expected_slice = [[[3.1848, 4.2141, 4.1993], [2.9000, 3.5721, 3.6603], [2.5358, 3.0883, 3.6168]]] expected_slice = torch.tensor(expected_slice).to(torch_device) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], expected_slice, atol=TOLERANCE)) # class_queries_logits class_queries_logits = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1), ) expected_slice = torch.tensor( [[3.0668, -1.1833, -5.1103], [3.3440, -3.3620, -5.1101], [2.6017, -4.3613, -4.1444]] ).to(torch_device) self.assertTrue(torch.allclose(class_queries_logits[0, :3, :3], expected_slice, atol=TOLERANCE)) @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): model = ( OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) .to(torch_device, dtype=torch.float16) .eval() ) processor = self.default_processor image = prepare_img() inputs = processor(image, ["semantic"], return_tensors="pt").to(torch_device, dtype=torch.float16) with torch.no_grad(): _ = model(**inputs) def test_with_segmentation_maps_and_loss(self): dummy_model = OneFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints) processor = self.default_processor processor.image_processor.num_text = dummy_model.config.num_queries - dummy_model.config.text_encoder_n_ctx dummy_model.config.is_training = True model = OneFormerForUniversalSegmentation(dummy_model.config).to(torch_device).eval() del dummy_model inputs = processor( [np.zeros((3, 512, 640)), np.zeros((3, 512, 640))], ["semantic", "semantic"], segmentation_maps=[np.zeros((384, 384)).astype(np.float32), np.zeros((384, 384)).astype(np.float32)], return_tensors="pt", ) inputs["pixel_values"] = inputs["pixel_values"].to(torch_device) inputs["task_inputs"] = inputs["task_inputs"].to(torch_device) inputs["text_inputs"] = inputs["text_inputs"].to(torch_device) inputs["mask_labels"] = [el.to(torch_device) for el in inputs["mask_labels"]] inputs["class_labels"] = [el.to(torch_device) for el in inputs["class_labels"]] with torch.no_grad(): outputs = model(**inputs) self.assertTrue(outputs.loss is not None)
transformers/tests/models/oneformer/test_modeling_oneformer.py/0
{ "file_path": "transformers/tests/models/oneformer/test_modeling_oneformer.py", "repo_id": "transformers", "token_count": 10738 }
462
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class OwlViTProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() vocab = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) image_processor_map = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48145466, 0.4578275, 0.40821073], "image_std": [0.26862954, 0.26130258, 0.27577711], } self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME) with open(self.image_processor_file, "w", encoding="utf-8") as fp: json.dump(image_processor_map, fp) def get_tokenizer(self, **kwargs): return CLIPTokenizer.from_pretrained(self.tmpdirname, pad_token="!", **kwargs) def get_rust_tokenizer(self, **kwargs): return CLIPTokenizerFast.from_pretrained(self.tmpdirname, pad_token="!", **kwargs) def get_image_processor(self, **kwargs): return OwlViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def prepare_image_inputs(self): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. """ image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)] image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] return image_inputs def test_save_load_pretrained_default(self): tokenizer_slow = self.get_tokenizer() tokenizer_fast = self.get_rust_tokenizer() image_processor = self.get_image_processor() processor_slow = OwlViTProcessor(tokenizer=tokenizer_slow, image_processor=image_processor) processor_slow.save_pretrained(self.tmpdirname) processor_slow = OwlViTProcessor.from_pretrained(self.tmpdirname, use_fast=False) processor_fast = OwlViTProcessor(tokenizer=tokenizer_fast, image_processor=image_processor) processor_fast.save_pretrained(self.tmpdirname) processor_fast = OwlViTProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer, CLIPTokenizer) self.assertIsInstance(processor_fast.tokenizer, CLIPTokenizerFast) self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor, OwlViTImageProcessor) self.assertIsInstance(processor_fast.image_processor, OwlViTImageProcessor) def test_save_load_pretrained_additional_features(self): processor = OwlViTProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False) processor = OwlViTProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", pad_token="!", do_normalize=False ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, CLIPTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, OwlViTImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_image_proc = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str, return_tensors="np") encoded_tok = tokenizer(input_str, return_tensors="np") for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist(), encoded_processor[key][0].tolist()) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_processor_with_text_list(self): model_name = "google/owlvit-base-patch32" processor = OwlViTProcessor.from_pretrained(model_name) input_text = ["cat", "nasa badge"] inputs = processor(text=input_text) seq_length = 16 self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape, (2, seq_length)) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_processor_with_nested_text_list(self): model_name = "google/owlvit-base-patch32" processor = OwlViTProcessor.from_pretrained(model_name) input_texts = [["cat", "nasa badge"], ["person"]] inputs = processor(text=input_texts) seq_length = 16 batch_size = len(input_texts) num_max_text_queries = max([len(texts) for texts in input_texts]) self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape, (batch_size * num_max_text_queries, seq_length)) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_processor_case(self): model_name = "google/owlvit-base-patch32" processor = OwlViTProcessor.from_pretrained(model_name) input_texts = ["cat", "nasa badge"] inputs = processor(text=input_texts) seq_length = 16 input_ids = inputs["input_ids"] predicted_ids = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask"]) self.assertEqual(inputs["input_ids"].shape, (2, seq_length)) self.assertListEqual(list(input_ids[0]), predicted_ids[0]) self.assertListEqual(list(input_ids[1]), predicted_ids[1]) def test_processor_case2(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() query_input = self.prepare_image_inputs() inputs = processor(images=image_input, query_images=query_input) self.assertListEqual(list(inputs.keys()), ["query_pixel_values", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = OwlViTProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/owlvit/test_processor_owlvit.py/0
{ "file_path": "transformers/tests/models/owlvit/test_processor_owlvit.py", "repo_id": "transformers", "token_count": 4354 }
463
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class PerceiverTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "deepmind/language-perceiver" tokenizer_class = PerceiverTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def perceiver_tokenizer(self): return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver") def get_tokenizer(self, **kwargs) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_multibytes_char(self): tokenizer = self.perceiver_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]Unicode €.[SEP]") encoded = tokenizer("e è é ê ë") encoded_ids = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]e è é ê ë[SEP]") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "[CLS]e è é ê ë[SEP]") def test_prepare_batch_integration(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: skip batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 38), batch.input_ids.shape) self.assertEqual((2, 38), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.perceiver_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) # cannot use default save_and_load_tokenizer test method because tokenizer has no vocab def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00e9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens( {"additional_special_tokens": additional_special_tokens}, replace_additional_special_tokens=False ) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens # We need to add the extra_ids in the list of the arg additional_special_tokens def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_invalid_byte_id(self): tokenizer = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178]), "�") @unittest.skip(reason="tokenizer does not have vocabulary") def test_get_vocab(self): pass @unittest.skip(reason="inputs cannot be pretokenized") def test_pretokenized_inputs(self): # inputs cannot be pretokenized since ids depend on whole input string and not just on single characters pass @unittest.skip(reason="vocab does not exist") def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str)
transformers/tests/models/perceiver/test_tokenization_perceiver.py/0
{ "file_path": "transformers/tests/models/perceiver/test_tokenization_perceiver.py", "repo_id": "transformers", "token_count": 6120 }
464
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the Tensorflow ResNet model.""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TFResNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, ) def create_and_check_model(self, config, pixel_values, labels): model = TFResNetModel(config=config) result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFResNetForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFResNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False has_attentions = False def setUp(self): self.model_tester = TFResNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ResNetConfig, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="ResNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ResNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/resnet-50" model = TFResNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFResNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("microsoft/resnet-50") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFResNetForImageClassification.from_pretrained("microsoft/resnet-50") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-11.1069, -9.7877, -8.3777]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
transformers/tests/models/resnet/test_modeling_tf_resnet.py/0
{ "file_path": "transformers/tests/models/resnet/test_modeling_tf_resnet.py", "repo_id": "transformers", "token_count": 3760 }
465
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class TFRoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRoFormerForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFRoFormerForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFRoFormerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: add `prepare_inputs_for_generation` for `TFRoFormerForCausalLM` def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def setUp(self): self.model_tester = TFRoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base") self.assertIsNotNone(model) @require_tf class TFRoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = [1, 6, vocab_size] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. expected_slice = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4) @require_tf class TFRoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = tf.constant([[4, 10]]) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6) emb = emb1(input_ids.shape) desired_weights = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(emb, desired_weights, atol=self.tolerance) def test_positional_emb_weights_against_roformer(self): desired_weights = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512) emb1([2, 16, 512]) weights = emb1.weight[:3, :5] tf.debugging.assert_near(weights, desired_weights, atol=self.tolerance) @require_tf class TFRoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 key_layer = -tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 embed_positions = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = TFRoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) desired_key_layer = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance) tf.debugging.assert_near(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance)
transformers/tests/models/roformer/test_modeling_tf_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_modeling_tf_roformer.py", "repo_id": "transformers", "token_count": 7807 }
466
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, PreTrainedTokenizerFast, SeamlessM4TTokenizer, SeamlessM4TTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.m2m_100.modeling_m2m_100 import shift_tokens_right EN_CODE = 256047 RO_CODE = 256145 SMALL_TRAINING_CORPUS = [ ["This is the first sentence.", "This is the second one."], ["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."], ] @require_sentencepiece @require_tokenizers class SeamlessM4TTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/hf-seamless-m4t-medium" tokenizer_class = SeamlessM4TTokenizer rust_tokenizer_class = SeamlessM4TTokenizerFast test_rust_tokenizer = True test_sentencepiece = True from_pretrained_kwargs = {} def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @unittest.skip(reason="This fails currently and is a blocker. No idea why TODO @ylacombe") def test_maximum_encoding_length_single_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20) sequence = tokenizer.encode(seq_0, add_special_tokens=False) total_length = len(sequence) self.assertGreater( total_length, 4, "Issue with the testing sequence, please update it, it's too short" ) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_1 = seq_0 * model_max_length sequence1 = tokenizer(seq_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) self.assertGreater( total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short", ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"Truncation: {truncation_state}"): output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) # Overflowing tokens stride = 2 # modify padding because it's activated by default in seamlessM4T information = tokenizer( seq_0, max_length=total_length - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, padding=False, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) else: truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) @unittest.skip(reason="By defaults, uses pad_to_multiple_of which breaks the test") def test_maximum_encoding_length_pair_input(self): pass def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest(reason="No padding token.") else: empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8) for key, value in empty_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # default to padding=True so need to precise which padding is called normal_tokens = tokenizer("This", pad_to_multiple_of=8, padding=False) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, tokenizer.__call__, "This", padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) @require_torch def test_prepare_seq2seq_batch(self): if not self.test_seq2seq: self.skipTest(reason="test_seq2seq is set to False") tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Longer text that will definitely require truncation. src_text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng", tgt_lang="ron", pad_to_multiple_of=None, ) except NotImplementedError: self.skipTest(reason="Encountered NotImplementedError when calling prepare_seq2seq_batch") self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 10) # TODO: not working for tgt_text # max_target_length will default to max_length if not specified batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=4, return_tensors="pt", pad_to_multiple_of=None, ) self.assertEqual(batch.input_ids.shape[1], 4) self.assertEqual(batch.labels.shape[1], 4) batch_encoder_only = tokenizer.prepare_seq2seq_batch( src_texts=src_text, max_length=4, max_target_length=10, return_tensors="pt", pad_to_multiple_of=None, ) self.assertEqual(batch_encoder_only.input_ids.shape[1], 4) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 4) self.assertNotIn("decoder_input_ids", batch_encoder_only) @unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.") def test_save_slow_from_fast_and_reload_fast(self): pass # Copied from tests.models.nllb.test_tokenization_nllb.NllbTokenizationTest.test_special_tokens_initialization def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, # , from_slow=True <- unfortunately too slow to convert ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @unittest.skip( "encode_plus and batch_encode_plus are deprecated and __call__ do some processing, so we expect different results." ) def test_call(self): pass def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. # make sure it has the same prefix tokens first new_tokenizer.tgt_lang = tokenizer.tgt_lang tokenizer.tgt_lang = tokenizer.tgt_lang self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) # Assert the set of special tokens match as we didn't ask to change them self.assertSequenceEqual( tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) @unittest.skip(reason="Fails because of the hack of adding <unk> in _tokenize") def test_pickle_subword_regularization_tokenizer(self): pass @unittest.skip(reason="Fails because of the hack of adding <unk> in _tokenize") def test_subword_regularization_tokenizer(self): pass @require_torch @require_sentencepiece @require_tokenizers class SeamlessM4TDistilledIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/hf-seamless-m4t-medium" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 3] # fmt: skip @classmethod def setUpClass(cls): cls.tokenizer: SeamlessM4TTokenizer = SeamlessM4TTokenizer.from_pretrained( cls.checkpoint_name, src_lang="eng", tgt_lang="ron" ) # cls.pad_token_id = 1 return cls def test_language_codes(self): self.assertEqual(self.tokenizer.convert_tokens_to_ids("__ace_Latn__"), 256002) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__shn__"), 256152) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__eng__"), 256047) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__fra__"), 256057) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__quy__"), 256144) def test_tokenizer_tgt_lang(self): ids = self.tokenizer(self.src_text, src_lang="fra").input_ids[0] self.assertListEqual(self.expected_src_tokens[1:], ids[1 : len(self.expected_src_tokens)]) self.assertEqual(256057, ids[0]) rest_ids = ids[len(self.expected_src_tokens) :] self.assertListEqual([0] * len(rest_ids), rest_ids) ids = self.tokenizer(self.src_text, src_lang="__shn__").input_ids[0] self.assertListEqual(self.expected_src_tokens[1:], ids[1 : len(self.expected_src_tokens)]) self.assertEqual(256152, ids[0]) # Copied from tests.models.nllb.test_tokenization_nllb.NllbDistilledIntegrationTest.test_enro_tokenizer_decode_ignores_language_codes def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-1], 3) self.assertEqual(ids[0], EN_CODE) self.assertEqual(len(ids), desired_max_length) @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), pad_to_multiple_of=None, return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right( batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.convert_tokens_to_ids("__ron__") ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 15), batch.input_ids.shape) self.assertEqual((2, 15), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(RO_CODE, batch.decoder_input_ids[0, 0]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE]) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) def test_seq2seq_max_length(self): batch = self.tokenizer( self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt", pad_to_multiple_of=None ) targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right( labels, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.convert_tokens_to_ids(self.tokenizer.tgt_lang), ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="eng", tgt_lang="fra" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[256047, 70, 7356, 3]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 256057, }, ) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): """ A class that regroups important test to make sure that we properly handle the special tokens. """ @classmethod def setUpClass(cls): tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, extra_ids=0, add_bos_token=False, legacy=False) tokenizer.add_special_tokens({"additional_special_tokens": [AddedToken("<s>", rstrip=False, lstrip=False)]}) cls.tokenizer = tokenizer return cls def test_add_dummy_prefix(self): # make sure `'▁'` is prepended, and outputs match sp_model's # `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute input_ids = self.tokenizer.encode(". Hello") self.assertEqual(input_ids, [3, 1, 8, 5, 157, 87, 21, 3]) sp_encode = self.tokenizer.sp_model.encode(". Hello") # [bos, lang_id, _] + offset_sp_encode self.assertEqual(input_ids[:-1], [3, 1, 8] + [i + self.tokenizer.fairseq_offset for i in sp_encode]) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): # make sure the extra spaces are eaten. Since the sample vocab does not have # `______`. sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute is set to False input_ids = self.tokenizer.encode(" . Hello") self.assertEqual(input_ids, [3, 1, 8, 5, 157, 87, 21, 3]) sp_encode = self.tokenizer.sp_model.encode(" . Hello") self.assertEqual([i - self.tokenizer.fairseq_offset for i in input_ids[2:-1]], [7] + sp_encode) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) # `'▁'` is also a whitespace input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 3]) tokens = self.tokenizer.tokenize("▁He is not") sp_encode = [ self.tokenizer.sp_model.piece_to_id("▁He"), self.tokenizer.sp_model.piece_to_id("▁is"), self.tokenizer.sp_model.piece_to_id("▁not"), ] self.assertEqual([i - self.tokenizer.fairseq_offset for i in input_ids[2:-1]], sp_encode) self.assertEqual(tokens, ["▁He", "▁is", "▁not"]) # no extra space added input_ids = self.tokenizer.encode("▁He is not<s> ▁He") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 2, 157, 3]) tokens = self.tokenizer.tokenize("▁He is not<s> ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "<s>", "▁He"]) # spaces are eaten by spm + our strip # make sure that the output after the extra id is the same as if # extra_id was not there input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 157, 3]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "▁He"]) # spaces are eaten by spm even if not start def test_character_after_special_token(self): # Make sure that `tokenizer.tokenize` is similar to # adding the equivalent special token to the vocab input_ids = self.tokenizer.encode("Hey <s>I") self.assertEqual(input_ids, [3, 1, 157, 31, 2, 101, 3]) sp_encode = self.tokenizer.sp_model.encode("Hey .I") # the last token besides eos should be 100 offset self.assertEqual(input_ids[-2] - self.tokenizer.fairseq_offset, sp_encode[-1]) tokens = self.tokenizer.tokenize("<s>I") self.assertEqual(tokens, ["<s>", "I"]) input_ids = self.tokenizer.encode("Hello, <s>,") self.assertEqual(input_ids, [3, 1, 157, 87, 21, 4, 2, 4, 3]) tokens = self.tokenizer.tokenize("Hello, <s>,") self.assertEqual(tokens, ["▁He", "ll", "o", ",", "<s>", ","]) def test_special_tokens_strip(self): input_ids = self.tokenizer.encode(" <s> ,") self.assertEqual(input_ids, [3, 1, 2, 8, 4, 3]) tokens = self.tokenizer.tokenize(" <s> ,") # spaces are eaten by rstrip / lstrip + spm sp_model.encode(" ") = [] self.assertEqual(tokens, ["<s>", "▁", ","]) input_ids = self.tokenizer.encode("No <s> ▁He") self.assertEqual(input_ids, [3, 1, 285, 2, 157, 3]) tokens = self.tokenizer.tokenize("No <s> ▁He") self.assertEqual(tokens, ["▁No", "<s>", "▁He"]) # spaces are eaten by rstrip / lstrip
transformers/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py/0
{ "file_path": "transformers/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py", "repo_id": "transformers", "token_count": 15005 }
467
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SigLIP model.""" import inspect import os import tempfile import unittest from typing import Tuple import numpy as np import requests from parameterized import parameterized from pytest import mark from transformers import SiglipConfig, SiglipTextConfig, SiglipVisionConfig from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_gpu, require_torch_sdpa, require_vision, slow, torch_device, ) from transformers.utils import ( is_torch_available, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, is_torch_sdpa_available, is_vision_available, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, is_flaky, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SiglipForImageClassification, SiglipModel, SiglipTextModel, SiglipVisionModel if is_torch_sdpa_available(): from torch.nn.attention import SDPBackend, sdpa_kernel if is_vision_available(): from PIL import Image from transformers import SiglipProcessor class SiglipModelTesterMixin(ModelTesterMixin): def test_eager_matches_sdpa_inference( self, torch_dtype: str, use_attention_mask_options: Tuple[bool, ...] = (True, False), logit_keys: Tuple[str, ...] = ("logits_per_image", "logits_per_text", "image_embeds", "text_embeds"), ): if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_dtype == "float16" and not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if torch_dtype == "bfloat16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Convert to torch dtype dtypes = { "float16": torch.float16, "bfloat16": torch.bfloat16, "float32": torch.float32, } torch_dtype = dtypes[torch_dtype] atols = { torch.float32: 1e-5, torch.bfloat16: 3e-2, torch.float16: 5e-3, } rtols = { torch.float32: 1e-4, torch.bfloat16: 3e-2, torch.float16: 5e-3, } atol = atols[torch_dtype] rtol = rtols[torch_dtype] def get_mean_reldiff(msg, current_case, x, ref, atol, rtol): return f"{msg} {current_case}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa and model_sdpa.config.model_type != "falcon": raise ValueError("The SDPA model should have SDPA attention layers") # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving the model each time, # but it would be nicer to have an efficient way to use parameterized.expand cases = [ (use_mask, output_attentions, sdpa_backend, batch_size) for use_mask in use_attention_mask_options for output_attentions in [True, False] for sdpa_backend in [ SDPBackend.MATH, [SDPBackend.FLASH_ATTENTION, SDPBackend.MATH], [SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH], [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH], ] for batch_size in [1, 5] ] fail_cases = [] for use_mask, output_attentions, sdpa_backend, batch_size in cases: processed_inputs = inputs_dict.copy() # convert to torch_dtype if "pixel_values" in processed_inputs: processed_inputs["pixel_values"] = processed_inputs["pixel_values"].to(torch_dtype) # slice for different batch sizes for key in ["pixel_values", "input_ids", "attention_mask"]: if key in processed_inputs: processed_inputs[key] = processed_inputs[key][:batch_size] # set attention mask with left padding if not use_mask: processed_inputs.pop("attention_mask", None) else: dummy_attention_mask = processed_inputs["attention_mask"] dummy_attention_mask[:] = 1 dummy_attention_mask[:, :1] = 0 processed_inputs["attention_mask"] = dummy_attention_mask processed_inputs["output_attentions"] = output_attentions processed_inputs["output_hidden_states"] = True current_case = ( f"padding_side=left, use_mask={use_mask}, batch_size={batch_size}, sdpa_backend={sdpa_backend}" ) prepared_inputs = self._prepare_for_class(processed_inputs, model_class) with torch.no_grad(): try: with sdpa_kernel(sdpa_backend): outputs_eager = model_eager(**prepared_inputs) outputs_sdpa = model_sdpa(**prepared_inputs) except Exception as e: fail_cases.append(f"{current_case}: {e}") continue for key in logit_keys: eager_logits = outputs_eager[key] sdpa_logits = outputs_sdpa[key] if use_mask: eager_logits = eager_logits[:, 1:] sdpa_logits = sdpa_logits[:, 1:] is_close = torch.allclose(eager_logits, sdpa_logits, atol=atol, rtol=rtol) if not is_close: fail_cases.append(get_mean_reldiff(key, current_case, sdpa_logits, eager_logits, atol, rtol)) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) class SiglipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return SiglipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = SiglipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipVisionModelTest(SiglipModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SIGLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SiglipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False def setUp(self): self.model_tester = SiglipVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=SiglipVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SIGLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SiglipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SiglipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("pooler_output", "last_hidden_state"), use_attention_mask_options=(False,), ) class SiglipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return SiglipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = SiglipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SiglipTextModelTest(SiglipModelTesterMixin, unittest.TestCase): all_model_classes = (SiglipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=SiglipTextConfig, hidden_size=37) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip does not use inputs_embeds") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="SiglipTextModel has no base class and is not available in MODEL_MAPPING") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_save_load_fast_init_from_base def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SiglipTextModel has no base class and is not available in MODEL_MAPPING") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_save_load_fast_init_to_base def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("pooler_output", "last_hidden_state"), use_attention_mask_options=(False, True), ) class SiglipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = SiglipTextModelTester(parent, **text_kwargs) self.vision_model_tester = SiglipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training # Copied from tests.models.clip.test_modeling_clip.CLIPModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return SiglipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = SiglipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": False, } return config, inputs_dict @require_torch class SiglipModelTest(SiglipModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SiglipModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": SiglipModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipModelTester(self) # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_hidden_states_output def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_retain_grad_hidden_states_attentions def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="SiglipModel does not have input/output embeddings") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model_get_set_embeddings def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest._create_and_check_torchscript with CLIP->Siglip def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Siglip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_load_vision_text_config with CLIP->Siglip def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save SiglipConfig and check if we can load SiglipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = SiglipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save SiglipConfig and check if we can load SiglipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = SiglipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_pixel_values = inputs_dict["pixel_values"].to(torch.bfloat16) dummy_input_ids = inputs_dict["input_ids"] outputs = model(pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Image logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Text logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) # Test with attention mask dummy_attention_mask = inputs_dict["attention_mask"] if dummy_attention_mask is not None: dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 outputs = model( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) # check with inference + dropout model.train() _ = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest("SigLIP does not support right padding") @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("logits_per_image", "logits_per_text", "image_embeds", "text_embeds"), use_attention_mask_options=(False, True), ) class SiglipForImageClassificationModelTester(SiglipModelTester): def __init__(self, parent): super().__init__(parent) self.batch_size = self.vision_model_tester.batch_size self.num_hidden_layers = self.vision_model_tester.num_hidden_layers self.hidden_size = self.vision_model_tester.hidden_size self.seq_length = self.vision_model_tester.seq_length def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipForImageClassificationModelTest(SiglipModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SiglipForImageClassification,) if is_torch_available() else () pipeline_model_mapping = {"image-classification": SiglipForImageClassification} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False def setUp(self): self.model_tester = SiglipForImageClassificationModelTester(self) @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("logits",), use_attention_mask_options=(False,) ) # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch class SiglipModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name).to(torch_device) processor = SiglipProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of 2 cats", "a photo of 2 dogs"], images=image, padding="max_length", return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits_per_image = outputs.logits_per_image logits_per_text = outputs.logits_per_text # verify the logits self.assertEqual( logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[-0.7567, -10.3354]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)) # verify the probs probs = torch.sigmoid(logits_per_image) # these are the probabilities expected_probs = torch.tensor([[3.1937e-01, 3.2463e-05]], device=torch_device) self.assertTrue(torch.allclose(probs, expected_probs, atol=1e-3)) @slow def test_inference_interpolate_pos_encoding(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name).to(torch_device) # 640 x 480 image image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") processor = SiglipProcessor.from_pretrained(model_name, do_resize=False, size={"height": 480, "width": 640}) inputs = processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the shape # patch size = 16 # batch size 1, (640/16) * (480/16) = 1200 patches, 768 hidden size expected_shape = torch.Size((1, 1200, 768)) self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
transformers/tests/models/siglip/test_modeling_siglip.py/0
{ "file_path": "transformers/tests/models/siglip/test_modeling_siglip.py", "repo_id": "transformers", "token_count": 18200 }
468
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Swin model.""" import collections import unittest from transformers import SwinConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, out_features=["stage1", "stage2"], out_indices=[1, 2], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = SwinModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_backbone(self, config, pixel_values, labels): model = SwinBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], 16, 16]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) # verify backbone works with out_features=None config.out_features = None model = SwinBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[-1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), 1) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = SwinForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = SwinForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = SwinForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = SwinForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SwinModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SwinModel, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": SwinModel, "image-classification": SwinForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = SwinModelTester(self) self.config_tester = ConfigTester( self, config_class=SwinConfig, embed_dim=37, has_text_modality=False, common_properties=["image_size", "patch_size", "num_channels"], ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # TODO: check if this works again for PyTorch 2.x.y @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass def test_training_gradient_checkpointing(self): super().test_training_gradient_checkpointing() def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Swin Transformer does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # also another +1 for reshaped_hidden_states added_hidden_states = 1 if model_class.__name__ == "SwinBackbone" else 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Swin has a different seq_length patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) if not model_class.__name__ == "SwinBackbone": reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_hidden_states_output_with_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): model_name = "microsoft/swin-tiny-patch4-window7-224" model = SwinModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_vision @require_torch class SwinModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = SwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224").to(torch_device) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.0948, -0.6454, -0.0921]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow def test_inference_interpolate_pos_encoding(self): # Swin models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. model = SwinModel.from_pretrained("microsoft/swin-tiny-patch4-window7-224").to(torch_device) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, size={"height": 481, "width": 481}, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 256, 768)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) @require_torch class SwinBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (SwinBackbone,) if is_torch_available() else () config_class = SwinConfig def setUp(self): self.model_tester = SwinModelTester(self)
transformers/tests/models/swin/test_modeling_swin.py/0
{ "file_path": "transformers/tests/models/swin/test_modeling_swin.py", "repo_id": "transformers", "token_count": 8981 }
469
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import unittest from huggingface_hub import hf_hub_download from transformers import UdopConfig, is_torch_available, is_vision_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UdopEncoderModel, UdopForConditionalGeneration, UdopModel, UdopProcessor if is_vision_available(): from PIL import Image class UdopModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=32, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, scope=None, decoder_layers=None, range_bbox=1000, decoder_start_token_id=0, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.scope = None self.decoder_layers = decoder_layers self.range_bbox = range_bbox self.decoder_start_token_id = decoder_start_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.encoder_seq_length, 4], self.range_bbox).float() # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, bbox, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_config(self): return UdopConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def create_and_check_model( self, config, input_ids, bbox, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UdopModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, bbox=bbox, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, bbox=bbox, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, bbox, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UdopForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, bbox=bbox, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_generate_with_past_key_values( self, config, input_ids, bbox, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UdopForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], bbox=bbox[:1, :, :], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate( input_ids[:1], bbox=bbox[:1, :, :], num_beams=2, max_length=5, do_sample=True ) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, bbox, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = UdopForConditionalGeneration(config=config).to(torch_device).half().eval() output = model(input_ids, bbox=bbox, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids).logits self.parent.assertFalse(torch.isnan(output).any().item()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "bbox": bbox, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict @require_torch class UdopModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( UdopModel, UdopForConditionalGeneration, ) if is_torch_available() else () ) all_generative_model_classes = (UdopForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": UdopModel} if is_torch_available() else {} fx_compatible = False test_pruning = False test_torchscript = False test_head_masking = False test_resize_embeddings = True test_model_parallel = False is_encoder_decoder = True test_cpu_offload = False # The small UDOP model needs higher percentages for CPU/MP tests model_split_percents = [0.8, 0.9] def setUp(self): self.model_tester = UdopModelTester(self) self.config_tester = ConfigTester(self, config_class=UdopConfig, d_model=37) def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ == "UdopForConditionalGeneration": if return_labels: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) return inputs_dict def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) @unittest.skip(reason="Gradient checkpointing is not supported by this model") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = sorted([*signature.parameters.keys()]) expected_arg_names = [ "attention_mask", "bbox", "cross_attn_head_mask", "decoder_attention_mask", "decoder_head_mask", "decoder_input_ids", "decoder_inputs_embeds", "encoder_outputs", "head_mask", "input_ids", "inputs_embeds", ] if model_class in self.all_generative_model_classes: expected_arg_names.append( "labels", ) expected_arg_names = sorted(expected_arg_names) self.assertListEqual(sorted(arg_names[: len(expected_arg_names)]), expected_arg_names) @unittest.skip( "Not currently compatible. Fails with - NotImplementedError: Cannot copy out of meta tensor; no data!" ) def test_save_load_low_cpu_mem_usage(self): pass @slow def test_model_from_pretrained(self): model_name = "microsoft/udop-large" model = UdopForConditionalGeneration.from_pretrained(model_name) self.assertIsNotNone(model) class UdopEncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, seq_length=7, # For common tests is_training=False, use_attention_mask=True, hidden_size=32, num_hidden_layers=5, decoder_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=32, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, scope=None, range_bbox=1000, ): self.parent = parent self.batch_size = batch_size # For common tests self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.decoder_layers = decoder_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.scope = None self.range_bbox = range_bbox def get_config(self): return UdopConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=False, ) def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) bbox = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox).float() # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: t = bbox[i, j, 3] bbox[i, j, 3] = bbox[i, j, 1] bbox[i, j, 1] = t if bbox[i, j, 2] < bbox[i, j, 0]: t = bbox[i, j, 2] bbox[i, j, 2] = bbox[i, j, 0] bbox[i, j, 0] = t attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) config = self.get_config() return ( config, input_ids, bbox, attention_mask, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, bbox, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "bbox": bbox, "attention_mask": attention_mask, } return config, inputs_dict def create_and_check_model( self, config, input_ids, bbox, attention_mask, ): model = UdopEncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, ) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, bbox, attention_mask, ): model = UdopEncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, bbox=bbox, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) class UdopEncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (UdopEncoderModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_head_masking = False test_resize_embeddings = False test_model_parallel = False all_parallelizable_model_classes = (UdopEncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = UdopEncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=UdopConfig, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip( "Not currently compatible. Fails with - NotImplementedError: Cannot copy out of meta tensor; no data!" ) def test_save_load_low_cpu_mem_usage(self): pass @require_torch @require_sentencepiece @require_tokenizers @require_vision @slow class UdopModelIntegrationTests(unittest.TestCase): @cached_property def image(self): filepath = hf_hub_download( repo_id="hf-internal-testing/fixtures_docvqa", filename="document_2.png", repo_type="dataset" ) image = Image.open(filepath).convert("RGB") return image @cached_property def processor(self): return UdopProcessor.from_pretrained("microsoft/udop-large") @cached_property def model(self): return UdopForConditionalGeneration.from_pretrained("microsoft/udop-large").to(torch_device) def test_conditional_generation(self): processor = self.processor model = self.model prompt = "Question answering. In which year is the report made?" encoding = processor(images=self.image, text=prompt, return_tensors="pt").to(torch_device) predicted_ids = model.generate(**encoding) predicted_text = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0] self.assertEqual(predicted_text, "2013")
transformers/tests/models/udop/test_modeling_udop.py/0
{ "file_path": "transformers/tests/models/udop/test_modeling_udop.py", "repo_id": "transformers", "token_count": 9694 }
470
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VideoLlava model.""" import gc import unittest import numpy as np import requests from huggingface_hub import hf_hub_download from transformers import ( VideoLlavaConfig, VideoLlavaForConditionalGeneration, VideoLlavaProcessor, is_torch_available, is_vision_available, ) from transformers.testing_utils import require_bitsandbytes, require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): from PIL import Image class VideoLlavaVisionText2TextModelTester: def __init__( self, parent, ignore_index=-100, image_token_index=0, video_token_index=1, projector_hidden_act="gelu", seq_length=13, num_frames=8, vision_feature_select_strategy="default", vision_feature_layer=-1, text_config={ "model_type": "llama", "seq_length": 13, "is_training": True, "use_input_mask": True, "use_token_type_ids": False, "use_labels": True, "vocab_size": 99, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 2048, # we need it high because videos are 8 frames "type_vocab_size": 16, "type_sequence_label_size": 2, "initializer_range": 0.02, "num_labels": 3, "num_choices": 4, "pad_token_id": 0, }, is_training=True, vision_config={ "model_type": "clip_vision_model", "batch_size": 12, "image_size": 30, "patch_size": 2, "num_channels": 3, "is_training": True, "hidden_size": 32, "projection_dim": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 37, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, ): self.parent = parent self.ignore_index = ignore_index self.image_token_index = image_token_index self.video_token_index = video_token_index self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.text_config = text_config self.vision_config = vision_config self.seq_length = seq_length self.num_frames = num_frames self.num_hidden_layers = text_config["num_hidden_layers"] self.vocab_size = text_config["vocab_size"] self.hidden_size = text_config["hidden_size"] self.num_attention_heads = text_config["num_attention_heads"] self.is_training = is_training self.batch_size = 5 self.num_channels = 3 self.image_size = 224 self.encoder_seq_length = 2044 def get_config(self): return VideoLlavaConfig( text_config=self.text_config, vision_config=self.vision_config, ignore_index=self.ignore_index, image_token_index=self.image_token_index, video_token_index=self.video_token_index, projector_hidden_act=self.projector_hidden_act, vision_feature_select_strategy=self.vision_feature_select_strategy, vision_feature_layer=self.vision_feature_layer, ) def prepare_config_and_inputs(self): pixel_values_videos = floats_tensor( [ self.batch_size, self.num_frames, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) pixel_values_images = floats_tensor( [ self.batch_size, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values_images, pixel_values_videos def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values_images, pixel_values_videos = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) # we are giving 3 videos and 3 images. Need to pass in image and video tokens, both # also need to make sure no other special tokens are set input_ids[(input_ids == 0) | (input_ids == 1)] = 3 input_ids[:, 0] = config.video_token_index input_ids[:, 1:2] = config.image_token_index inputs_dict = { "pixel_values_videos": pixel_values_videos, "pixel_values_images": pixel_values_images, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_batched_test(self): config_and_inputs = self.prepare_config_and_inputs() config, _, pixel_values_videos = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1 attention_mask = input_ids.ne(1).to(torch_device) # make sure no other special tokens are set input_ids[(input_ids == 0) | (input_ids == 1)] = 3 input_ids[:, 0] = config.video_token_index inputs_dict = { "pixel_values_videos": pixel_values_videos, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class VideoLlavaForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `VideoLlavaForConditionalGeneration`. """ all_model_classes = (VideoLlavaForConditionalGeneration,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = True test_head_masking = False def setUp(self): self.model_tester = VideoLlavaVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=VideoLlavaConfig, has_text_modality=False) @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecure seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Pass because video-LLava requires `attention_mask is not None`") def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Pass because video-LLava requires `attention_mask is not None`") def test_sdpa_can_dispatch_on_flash(self): pass def test_mixed_input(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() # test that the forward does not fail with torch.no_grad(): _ = model(**inputs) # if we remove some images from inputs leaving only one # image number mismatch error should raise inputs["pixel_values_images"] = inputs["pixel_values_images"][:1] with self.assertRaises(ValueError): _ = model(**inputs) def test_video_only_input(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() # replace video_token with dummy id which is not video token id # error that video-tokens and num-of-video-inputs mismatch will be raised inputs["input_ids"][:, 1:2] = 2 with self.assertRaises(ValueError): _ = model(**inputs) inputs["pixel_values_images"] = None _ = model(**inputs) def test_image_only_input(self): config, inputs = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device).eval() # set dummy id, which is not image token id, same as above inputs["input_ids"][:, :1] = 2 with self.assertRaises(ValueError): _ = model(**inputs) inputs["pixel_values_videos"] = None _ = model(**inputs) def test_batching_equivalence(self): def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return else: batched_row = batched_object[:1] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (torch.max(torch.abs(batched_row - single_row_object))) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={torch.max(torch.abs(batched_row - single_row_object))}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_batched_test() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() single_row_input = {} for key, value in batched_input_prepared.items(): single_row_input[key] = value[:1] with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) for key in model_batched_output: recursive_check(model_batched_output[key], model_row_output[key], model_name, key) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values_images"] del inputs["pixel_values_videos"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values_images"] del inputs["pixel_values_videos"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] self.assertTrue(torch.allclose(out_embeds, out_ids)) @require_torch class VideoLlavaForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") def tearDown(self): gc.collect() torch.cuda.empty_cache() @slow @require_bitsandbytes def test_small_model_integration_test(self): # Let' s make sure we test the preprocessing to replace what is used model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) prompt = "USER: <video>Why is this video funny? ASSISTANT:" video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) inputs = self.processor(prompt, videos=video_file, return_tensors="pt") EXPECTED_INPUT_IDS = torch.tensor([[1, 3148, 1001, 29901, 29871, 32001, 3750, 338, 445, 4863, 2090, 1460, 29973, 319, 1799, 9047, 13566, 29901]]) # fmt: skip self.assertTrue(torch.equal(inputs["input_ids"], EXPECTED_INPUT_IDS)) output = model.generate(**inputs, do_sample=False, max_new_tokens=20) EXPECTED_DECODED_TEXT = "USER: Why is this video funny? ASSISTANT: The video is funny because the baby is playing with a Wii remote while sitting on a bed" # fmt: skip self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_mixed_inputs(self): # Let' s make sure we test the preprocessing to replace what is used model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) prompts = [ "USER: <image>What are the cats in the image doing? ASSISTANT:", "USER: <video>Why is this video funny? ASSISTANT:", ] video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = self.processor(prompts, images=[image], videos=[video_file], padding=True, return_tensors="pt") output = model.generate(**inputs, do_sample=False, max_new_tokens=20) EXPECTED_DECODED_TEXT = [ 'USER: What are the cats in the image doing? ASSISTANT: The cats in the image are lying down on a red couch, possibly sleeping or rest', 'USER: Why is this video funny? ASSISTANT: The video is funny because the baby is playing with a Wii remote while sitting on a bed' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_llama(self): # Let' s make sure we test the preprocessing to replace what is used model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") prompt = "USER: <video>Describe the video in details. ASSISTANT:" video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) inputs = self.processor(prompt, videos=video_file, return_tensors="pt").to(torch_device, torch.float16) output = model.generate(**inputs, max_new_tokens=900, do_sample=False) EXPECTED_DECODED_TEXT = "USER: Describe the video in details. ASSISTANT: The video features a young child sitting on a bed, holding a book and reading it. " \ "The child appears to be enjoying the book, as they are fully engaged in the reading process. The bed is located in a bedroom, and there is a chair nearby. " \ "The child is wearing a light blue shirt and pink pants, and they have glasses on. The room is well-lit, and there is a clock on the wall. The child seems " \ "to be in a comfortable and relaxed environment, which is conducive to reading and learning. Overall, the video captures a heartwarming moment of a child " \ "engaging in a simple yet essential activity, which is reading." # fmt: skip self.assertEqual( processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_bitsandbytes def test_small_model_integration_test_llama_batched(self): # Let' s make sure we test the preprocessing to replace what is used model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf") processor.tokenizer.padding_side = "left" prompts = [ "USER: <video>What is the baby doing? ASSISTANT:", "USER: <video>Who is sitting next to the woman? ASSISTANT:", ] video_1 = np.load( hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset") ) video_2 = np.load( hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo_2.npy", repo_type="dataset") ) inputs = processor(prompts, videos=[video_1, video_2], return_tensors="pt", padding=True) output = model.generate(**inputs, max_new_tokens=20) EXPECTED_DECODED_TEXT = [ 'USER: What is the baby doing? ASSISTANT: The baby is sitting on a bed and reading a book.Ъ', 'USER: Who is sitting next to the woman? ASSISTANT: A small dog is sitting next to the woman.Ъ' ] # fmt: skip self.assertEqual(processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT) @slow @require_bitsandbytes def test_small_model_integration_test_llama_batched_regression(self): # Let' s make sure we test the preprocessing to replace what is used # Multi-image & multi-prompt (e.g. 3 images and 2 prompts now fails with SDPA, this tests if "eager" works as before) model = VideoLlavaForConditionalGeneration.from_pretrained( "LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True, attn_implementation="eager" ) processor = VideoLlavaProcessor.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", pad_token="<pad>") processor.tokenizer.padding_side = "left" prompts = [ "USER: <video>What is the baby doing? ASSISTANT:", "USER: <video>Who is sitting next to the woman? ASSISTANT: A small dog is sitting next to the woman. USER: <video>What about this video? ASSITANT:", ] video_1 = np.load( hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset") ) video_2 = np.load( hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="video_demo_2.npy", repo_type="dataset") ) inputs = processor(prompts, videos=[video_1, video_2, video_1], return_tensors="pt", padding=True) output = model.generate(**inputs, max_new_tokens=20) # fmt: off EXPECTED_DECODED_TEXT = [ 'USER: What is the baby doing? ASSISTANT: The baby is sitting on a bed and reading a book.Ъ', 'USER: Who is sitting next to the woman? ASSISTANT: A small dog is sitting next to the woman. USER: What about this video? ASSITANT: The video shows a baby sitting on a bed, reading a book. The baby is wearing glass' ] # fmt: on self.assertEqual(processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT) @slow @require_bitsandbytes def test_video_llava_index_error_bug(self): # This is a reproducer of https://github.com/huggingface/transformers/pull/28032 and makes sure it does not happen anymore # Please refer to that PR, or specifically https://github.com/huggingface/transformers/pull/28032#issuecomment-1860650043 for # more details model = VideoLlavaForConditionalGeneration.from_pretrained("LanguageBind/Video-LLaVA-7B-hf", load_in_4bit=True) # Simulate a super long prompt user_prompt = "Describe the video:?\n" * 200 prompt = f"USER: <video>{user_prompt}ASSISTANT:" video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) # let's expand it for 16 frames, to check model can handle any number of frames video_file = video_file.repeat(2, 0) inputs = self.processor(prompt, videos=video_file, return_tensors="pt").to(torch_device, torch.float16) # Make sure that `generate` works _ = model.generate(**inputs, max_new_tokens=20) @slow @require_torch_gpu def test_video_llava_merge_inputs_error_bug(self): # This is a reproducer of https://github.com/huggingface/transformers/pull/28333 and makes sure it does not happen anymore model = VideoLlavaForConditionalGeneration.from_pretrained( "LanguageBind/Video-LLaVA-7B-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True ).to(torch_device) # Simulate some user inputs pixel_values_videos = torch.randn( (2, 8, 3, 224, 224), dtype=torch.float, device=torch_device, ) # fmt: off input_ids = torch.tensor( [ [ 32001, 32001, 1, 15043, 7084, 32000, 32000, 32000, 32000, 32000, 32000, 32000, 32000, 29871, 13, 7900 ], [ 1, 15043, 7084, 29901, 29871, 32000, 32000, 32000, 32000, 32000, 32000, 32000, 32000, 29871, 13, 7900 ], ], dtype=torch.long, device=torch_device, ) # fmt: on attention_mask = torch.tensor( [[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.long, device=torch_device, ) # Make sure that the loss is properly computed loss = model( pixel_values_videos=pixel_values_videos, input_ids=input_ids, attention_mask=attention_mask, labels=input_ids, ).loss loss.backward() @slow @require_bitsandbytes def test_expansion_in_processing(self): model_id = "LanguageBind/Video-LLaVA-7B-hf" model = VideoLlavaForConditionalGeneration.from_pretrained(model_id, load_in_4bit=True) processor = VideoLlavaProcessor.from_pretrained(model_id) prompt = "USER: <video>Describe the video in details. ASSISTANT:" video_file = hf_hub_download( repo_id="raushan-testing-hf/videos-test", filename="video_demo.npy", repo_type="dataset" ) video_file = np.load(video_file) # check processing with expansion of inputs processor.vision_feature_select_strategy = "default" processor.patch_size = 14 inputs_expanded = processor(prompt, videos=video_file, return_tensors="pt").to(torch_device, torch.float16) self.assertTrue(inputs_expanded.input_ids.shape[-1] == 2073) # check processing without expansion of inputs (legacy behavior) processor.vision_feature_select_strategy = None processor.patch_size = None inputs = processor(prompt, videos=video_file, return_tensors="pt").to(torch_device, torch.float16) self.assertTrue(inputs.input_ids.shape[-1] == 18) # generate exactly 20 tokens output = model.generate(**inputs, min_new_tokens=20, max_new_tokens=20) output_expanded = model.generate(**inputs_expanded, min_new_tokens=20, max_new_tokens=20) # check that both inputs are handled correctly and generate the same output self.assertListEqual(output_expanded[:, -20:].tolist(), output[:, -20:].tolist())
transformers/tests/models/video_llava/test_modeling_video_llava.py/0
{ "file_path": "transformers/tests/models/video_llava/test_modeling_video_llava.py", "repo_id": "transformers", "token_count": 12050 }
471
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VisionTextDualEncoder model.""" from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor # Inspired by # https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py # From PyTorch internals def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_tf class TFVisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = TFVisionTextDualEncoderModel(config) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_model( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = TFVisionTextDualEncoderModel.from_pretrained(tmpdirname) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0].numpy() max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-5) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_vision_text_dual_encoder_model(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() outputs = model_2(**inputs) out_2 = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = TFVisionTextDualEncoderModel.from_pretrained(tmp_dirname) after_outputs = model_1(**inputs) out_1 = after_outputs[0].numpy() max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_tf class TFViTBertModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-random-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = TFViTModel(vision_config, name="vision_model") text_model = TFBertModel(text_config, name="text_model") return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = TFViTModelTester(self) bert_model_tester = TFBertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class TFDeiTRobertaModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf", "hf-internal-testing/tiny-random-roberta" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = TFVisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def get_vision_text_model(self, vision_config, text_config): vision_model = TFDeiTModel(vision_config, name="vision_model") text_model = TFRobertaModel(text_config, name="text_model") return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = TFDeiTModelTester(self) bert_model_tester = TFRobertaModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class TFCLIPVisionBertModelTest(TFVisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf", "hf-internal-testing/tiny-random-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = TFCLIPVisionModel(vision_config, name="vision_model") text_model = TFBertModel(text_config, name="text_model") return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = TFCLIPVisionModelTester(self) bert_model_tester = TFBertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class TFVisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian", logit_scale_init_value=1.0, from_pt=True ) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="np" ) outputs = model(**inputs) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = np.array([[1.2284727, 0.3104122]]) self.assertTrue(np.allclose(outputs.logits_per_image.numpy(), expected_logits, atol=1e-3))
transformers/tests/models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py/0
{ "file_path": "transformers/tests/models/vision_text_dual_encoder/test_modeling_tf_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 7464 }
472
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ViTDet model.""" import unittest from transformers import VitDetConfig from transformers.testing_utils import is_flaky, require_torch, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import VitDetBackbone, VitDetModel class VitDetModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.num_patches_one_direction = self.image_size // self.patch_size self.seq_length = (self.image_size // self.patch_size) ** 2 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return VitDetConfig( image_size=self.image_size, pretrain_image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = VitDetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction), ) def create_and_check_backbone(self, config, pixel_values, labels): model = VitDetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction], ) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) self.parent.assertListEqual(model.channels, [config.hidden_size]) # verify backbone works with out_features=None config.out_features = None model = VitDetBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual( list(result.feature_maps[0].shape), [self.batch_size, self.hidden_size, self.num_patches_one_direction, self.num_patches_one_direction], ) # verify channels self.parent.assertEqual(len(model.channels), 1) self.parent.assertListEqual(model.channels, [config.hidden_size]) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class VitDetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as VitDet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (VitDetModel, VitDetBackbone) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": VitDetModel} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = VitDetModelTester(self) self.config_tester = ConfigTester(self, config_class=VitDetConfig, has_text_modality=False, hidden_size=37) @is_flaky(max_attempts=3, description="`torch.nn.init.trunc_normal_` is flaky.") def test_initialization(self): super().test_initialization() # TODO: Fix me (once this model gets more usage) @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_cpu_offload(self): super().test_cpu_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload_bin(self): super().test_disk_offload() @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_disk_offload_safetensors(self): super().test_disk_offload() # TODO: Fix me (once this model gets more usage) @unittest.skip(reason="Does not work on the tiny model as we keep hitting edge cases.") def test_model_parallelism(self): super().test_model_parallelism() def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="VitDet does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = self.model_tester.num_hidden_layers self.assertEqual(len(hidden_states), expected_num_stages + 1) # VitDet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [ self.model_tester.num_patches_one_direction, self.model_tester.num_patches_one_direction, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) # overwrite since VitDet only supports retraining gradients of hidden states def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) @unittest.skip(reason="VitDet does not support feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="VitDet does not have standalone checkpoints since it used as backbone in other models") def test_model_from_pretrained(self): pass @require_torch class VitDetBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (VitDetBackbone,) if is_torch_available() else () config_class = VitDetConfig has_attentions = False def setUp(self): self.model_tester = VitDetModelTester(self)
transformers/tests/models/vitdet/test_modeling_vitdet.py/0
{ "file_path": "transformers/tests/models/vitdet/test_modeling_vitdet.py", "repo_id": "transformers", "token_count": 4697 }
473
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class TFFlaubertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base") features = { "input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]], dtype=tf.int32), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.int32), } output = model(features)["last_hidden_state"] expected_shape = tf.TensorShape((1, 6, 768)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = tf.convert_to_tensor( [ [ [0.0681762, 0.10894451, 0.06772504], [-0.06423668, 0.02366615, 0.04329344], [-0.06057295, 0.09974135, -0.00070584], ] ], dtype=tf.float32, ) self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
transformers/tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py/0
{ "file_path": "transformers/tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "repo_id": "transformers", "token_count": 850 }
474
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from transformers import ( MODEL_MAPPING, TF_MODEL_MAPPING, TOKENIZER_MAPPING, ImageFeatureExtractionPipeline, is_tf_available, is_torch_available, is_vision_available, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_vision_available(): from PIL import Image # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @is_pipeline_test class ImageFeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING @require_torch def test_small_model_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) img = prepare_img() outputs = feature_extractor(img) self.assertEqual( nested_simplify(outputs[0][0]), [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip @require_torch def test_small_model_w_pooler_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit-w-pooler", framework="pt" ) img = prepare_img() outputs = feature_extractor(img, pool=True) self.assertEqual( nested_simplify(outputs[0]), [-0.056, 0.083, 0.021, 0.038, 0.242, -0.279, -0.033, -0.003, 0.200, -0.192, 0.045, -0.095, -0.077, 0.017, -0.058, -0.063, -0.029, -0.204, 0.014, 0.042, 0.305, -0.205, -0.099, 0.146, -0.287, 0.020, 0.168, -0.052, 0.046, 0.048, -0.156, 0.093]) # fmt: skip @require_tf def test_small_model_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit-w-pooler", framework="tf" ) img = prepare_img() outputs = feature_extractor(img) self.assertEqual( nested_simplify(outputs[0][0]), [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip @require_tf def test_small_model_w_pooler_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit-w-pooler", framework="tf" ) img = prepare_img() outputs = feature_extractor(img, pool=True) self.assertEqual( nested_simplify(outputs[0]), [-0.056, 0.083, 0.021, 0.038, 0.242, -0.279, -0.033, -0.003, 0.200, -0.192, 0.045, -0.095, -0.077, 0.017, -0.058, -0.063, -0.029, -0.204, 0.014, 0.042, 0.305, -0.205, -0.099, 0.146, -0.287, 0.020, 0.168, -0.052, 0.046, 0.048, -0.156, 0.093]) # fmt: skip @require_torch def test_image_processing_small_model_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) # test with image processor parameters image_processor_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() with pytest.raises(ValueError): # Image doesn't match model input size feature_extractor(img, image_processor_kwargs=image_processor_kwargs) image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) # Test pooling option outputs = feature_extractor(img, pool=True) self.assertEqual(np.squeeze(outputs).shape, (32,)) @require_tf def test_image_processing_small_model_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" ) # test with image processor parameters image_processor_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() with pytest.raises(ValueError): # Image doesn't match model input size feature_extractor(img, image_processor_kwargs=image_processor_kwargs) image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) # Test pooling option outputs = feature_extractor(img, pool=True) self.assertEqual(np.squeeze(outputs).shape, (32,)) @require_torch def test_return_tensors_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) img = prepare_img() outputs = feature_extractor(img, return_tensors=True) self.assertTrue(torch.is_tensor(outputs)) @require_tf def test_return_tensors_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" ) img = prepare_img() outputs = feature_extractor(img, return_tensors=True) self.assertTrue(tf.is_tensor(outputs)) def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): if processor is None: self.skipTest(reason="No image processor") elif type(model.config) in TOKENIZER_MAPPING: self.skipTest( reason="This is a bimodal model, we need to find a more consistent way to switch on those models." ) elif model.config.is_encoder_decoder: self.skipTest( """encoder_decoder models are trickier for this pipeline. Do we want encoder + decoder inputs to get some featues? Do we want encoder only features ? For now ignore those. """ ) feature_extractor = ImageFeatureExtractionPipeline( model=model, image_processor=processor, torch_dtype=torch_dtype ) img = prepare_img() return feature_extractor, [img, img] def run_pipeline_test(self, feature_extractor, examples): imgs = examples outputs = feature_extractor(imgs[0]) self.assertEqual(len(outputs), 1) outputs = feature_extractor(imgs) self.assertEqual(len(outputs), 2)
transformers/tests/pipelines/test_pipelines_image_feature_extraction.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_feature_extraction.py", "repo_id": "transformers", "token_count": 3489 }
475
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch from transformers.pipelines.pt_utils import KeyDataset if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa", torch_dtype=torch_dtype ) examples = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def run_pipeline_test(self, vqa_pipeline, examples): outputs = vqa_pipeline(examples, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) @require_torch @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2) vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) @slow @require_torch def test_large_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2, ) @slow @require_torch @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "Question: how many cats are there? Answer:" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": "two"}]] * 2) @require_torch def test_small_model_pt_image_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") images = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000004016.png", ] outputs = vqa_pipeline(image=images, question="How many cats are there?", top_k=1) self.assertEqual( outputs, [[{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}]] ) @require_torch def test_small_model_pt_question_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" questions = ["How many cats are there?", "Are there any dogs?"] outputs = vqa_pipeline(image=image, question=questions, top_k=1) self.assertEqual( outputs, [[{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}]] ) @require_torch def test_small_model_pt_both_list(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") images = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000004016.png", ] questions = ["How many cats are there?", "Are there any dogs?"] outputs = vqa_pipeline(image=images, question=questions, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt_dataset(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") dataset = load_dataset("hf-internal-testing/dummy_image_text_data", split="train[:2]") question = "What's in the image?" outputs = vqa_pipeline(image=KeyDataset(dataset, "image"), question=question, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_tf @unittest.skip(reason="Visual question answering not implemented in TF") def test_small_model_tf(self): pass
transformers/tests/pipelines/test_pipelines_visual_question_answering.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_visual_question_answering.py", "repo_id": "transformers", "token_count": 4167 }
476
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, FbgemmFp8Config, OPTForCausalLM from transformers.testing_utils import ( require_accelerate, require_fbgemm_gpu, require_read_token, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_accelerate_available, is_torch_available if is_torch_available(): import torch if is_accelerate_available(): from accelerate import init_empty_weights @require_torch_gpu class FbgemmFp8ConfigTest(unittest.TestCase): def test_to_dict(self): """ Simple test that checks if one uses a config and converts it to a dict, the dict is the same as the config object """ quantization_config = FbgemmFp8Config() config_to_dict = quantization_config.to_dict() for key in config_to_dict: self.assertEqual(getattr(quantization_config, key), config_to_dict[key]) def test_from_dict(self): """ Simple test that checks if one uses a dict and converts it to a config object, the config object is the same as the dict """ dict = {"modules_to_not_convert": ["lm_head.weight"], "quant_method": "fbgemm_fp8"} quantization_config = FbgemmFp8Config.from_dict(dict) self.assertEqual(dict["modules_to_not_convert"], quantization_config.modules_to_not_convert) self.assertEqual(dict["quant_method"], quantization_config.quant_method) @slow @require_torch_gpu @require_fbgemm_gpu @require_accelerate @require_read_token class FbgemmFp8Test(unittest.TestCase): model_name = "meta-llama/Meta-Llama-3-8B" input_text = "What are we having for dinner?" max_new_tokens = 9 EXPECTED_OUTPUT = "What are we having for dinner?\nI'm having a steak and a salad" device_map = "cuda" offload_device_map = { "model.embed_tokens": 0, "model.layers.0": 0, "model.layers.1": 0, "model.layers.2": 0, "model.layers.3": 0, "model.layers.4": 0, "model.layers.5": 0, "model.layers.6": 0, "model.layers.7": 0, "model.layers.8": 0, "model.layers.9": 0, "model.layers.10": 0, "model.layers.11": 0, "model.layers.12": 0, "model.layers.13": 0, "model.layers.14": 0, "model.layers.15": 0, "model.layers.16": "cpu", "model.layers.17": "cpu", "model.layers.18": "cpu", "model.layers.19": "cpu", "model.layers.20": "disk", "model.layers.21": "disk", "model.layers.22": "disk", "model.layers.23": "disk", "model.layers.24": "disk", "model.layers.25": "disk", "model.layers.26": "disk", "model.layers.27": "disk", "model.layers.28": "disk", "model.layers.29": "disk", "model.layers.30": "disk", "model.layers.31": "disk", "model.norm": "disk", "lm_head": "disk", } # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ quantization_config = FbgemmFp8Config() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, device_map=cls.device_map, quantization_config=quantization_config ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from transformers.integrations import FbgemmFp8Linear, replace_with_fbgemm_fp8_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = FbgemmFp8Config() with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config) nb_fbgemm_linear = 0 for module in model.modules(): if isinstance(module, FbgemmFp8Linear): nb_fbgemm_linear += 1 self.assertEqual(nb_linears - 1, nb_fbgemm_linear) with init_empty_weights(): model = OPTForCausalLM(config) quantization_config = FbgemmFp8Config(modules_to_not_convert=["fc1"]) model = replace_with_fbgemm_fp8_linear(model, quantization_config=quantization_config) nb_fbgemm_linear = 0 for module in model.modules(): if isinstance(module, FbgemmFp8Linear): nb_fbgemm_linear += 1 self.assertEqual(nb_linears - 25, nb_fbgemm_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_change_loading_attributes(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantization_config = FbgemmFp8Config(activation_scale_ub=1000.0) model = AutoModelForCausalLM.from_pretrained( tmpdirname, device_map=self.device_map, quantization_config=quantization_config ) self.assertEqual(model.model.layers[1].mlp.down_proj.input_scale_ub.item(), 1000.0) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_quantized_model_multi_gpu(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 GPUS """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = FbgemmFp8Config() quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, device_map="auto", quantization_config=quantization_config ) self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1}) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_quantized_model_offload(self): """ Simple test that checks if the quantized model returns an error when loading with cpu/disk offloaded """ quantization_config = FbgemmFp8Config() with self.assertRaisesRegex( ValueError, "You are attempting to load an FP8 model with a device_map that contains a CPU or disk device." ): AutoModelForCausalLM.from_pretrained( self.model_name, device_map=self.offload_device_map, quantization_config=quantization_config ) def test_save_pretrained_offload(self): """ Simple test that checks if the saved quantized model is working properly cpu/disk offload """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.offload_device_map) output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_gpu def test_save_pretrained_multi_gpu(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.assertTrue(set(model.hf_device_map.values()) == {0, 1}) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=self.max_new_tokens) self.assertEqual(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
transformers/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py/0
{ "file_path": "transformers/tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py", "repo_id": "transformers", "token_count": 4542 }
477
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
transformers/tests/sagemaker/__init__.py/0
{ "file_path": "transformers/tests/sagemaker/__init__.py", "repo_id": "transformers", "token_count": 36 }
478
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import gc import inspect import math import os import os.path import random import re import tempfile import time import warnings from collections import defaultdict from typing import Dict, List, Tuple import numpy as np from packaging import version from parameterized import parameterized from pytest import mark import transformers from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, GenerationConfig, PretrainedConfig, PreTrainedModel, is_torch_available, logging, set_seed, ) from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from transformers.testing_utils import ( CaptureLogger, is_flaky, is_pt_flax_cross_test, is_pt_tf_cross_test, require_accelerate, require_bitsandbytes, require_flash_attn, require_read_token, require_safetensors, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_multi_accelerator, require_torch_multi_gpu, require_torch_sdpa, slow, torch_device, ) from transformers.utils import ( CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available, is_flax_available, is_tf_available, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, is_torch_fx_available, is_torch_sdpa_available, ) from transformers.utils.generic import ContextManagers, ModelOutput if is_accelerate_available(): from accelerate.utils import compute_module_sizes if is_torch_available(): import torch import torch.nn.functional as F from safetensors.torch import load_file as safe_load_file from safetensors.torch import save_file as safe_save_file from torch import nn from transformers import MODEL_MAPPING, AdaptiveEmbedding from transformers.modeling_utils import load_state_dict, no_init_weights from transformers.pytorch_utils import id_tensor_storage if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp from tests.utils.test_modeling_flax_utils import check_models_equal from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_fx_available(): from transformers.utils.fx import _FX_SUPPORTED_MODELS_WITH_KV_CACHE, symbolic_trace def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def _mock_init_weights(self, module): for name, param in module.named_parameters(recurse=False): # Use the first letter of the name to get a value and go from a <> -13 to z <> 12 value = ord(name[0].lower()) - 110 param.data.fill_(value) def _mock_all_init_weights(self): # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) import transformers.modeling_utils if transformers.modeling_utils._init_weights: for module in self.modules(): module._is_hf_initialized = False # Initialize weights self.apply(self._initialize_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() @require_torch class ModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () fx_compatible = False test_torchscript = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = False test_head_masking = True test_mismatched_shapes = True test_missing_keys = True test_model_parallel = False is_encoder_decoder = False has_attentions = True model_split_percents = [0.5, 0.7, 0.9] def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES): inputs_dict.pop("attention_mask") elif model_class.__name__ == MODEL_FOR_PRETRAINING_MAPPING_NAMES["hiera"]: config = self.model_tester.get_config() mask_spatial_shape = [ i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size) ] num_windows = math.prod(mask_spatial_shape) torch.manual_seed(0) inputs_dict["noise"] = torch.rand(self.model_tester.batch_size, num_windows) if return_labels: if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() return inputs_dict def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_save_load(out1, out2): # make sure we don't have nans out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_2 = out_2[~np.isneginf(out_2)] out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 out_1 = out_1[~np.isneginf(out_1)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) def test_from_pretrained_no_checkpoint(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) state_dict = model.state_dict() new_model = model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_keep_in_fp32_modules(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._keep_in_fp32_modules is None: self.skipTest(reason="Model class has no _keep_in_fp32_modules attribute defined") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16) for name, param in model.named_parameters(): if any(n in model_class._keep_in_fp32_modules for n in name.split(".")): self.assertTrue(param.dtype == torch.float32) else: self.assertTrue(param.dtype == torch.float16, name) def test_save_load_keys_to_ignore_on_save(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) _keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None) if _keys_to_ignore_on_save is None: continue # check the keys are in the original state_dict for k in _keys_to_ignore_on_save: self.assertIn(k, model.state_dict().keys(), "\n".join(model.state_dict().keys())) # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) output_model_file = os.path.join(tmpdirname, SAFE_WEIGHTS_NAME) state_dict_saved = safe_load_file(output_model_file) for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) # Test we can load the state dict in the model, necessary for the checkpointing API in Trainer. load_result = model.load_state_dict(state_dict_saved, strict=False) keys_to_ignore = set(model._keys_to_ignore_on_save) if hasattr(model, "_tied_weights_keys"): keys_to_ignore.update(set(model._tied_weights_keys)) self.assertTrue(len(load_result.missing_keys) == 0 or set(load_result.missing_keys) == keys_to_ignore) self.assertTrue(len(load_result.unexpected_keys) == 0) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue # at init model should have gradient checkpointing disabled model = model_class(config) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.gradient_checkpointing_enable() self.assertTrue(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to True for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertTrue( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to True" ) # check disable works model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to False for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertFalse( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to False" ) @is_flaky(description="low likelihood of failure, reason not yet discovered") def test_save_load_fast_init_from_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: self.skipTest(reason="Model class not in MODEL_MAPPING") base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(model_class): pass model_class_copy = CopyClass # make sure that all keys are expected for test model_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless model_class_copy._init_weights = _mock_init_weights model_class_copy.init_weights = _mock_all_init_weights model = base_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = model_class_copy.from_pretrained(tmpdirname) model_slow_init = model_class_copy.from_pretrained(tmpdirname, _fast_init=False) # Before we test anything for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = (model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key]).sum().item() else: max_diff = (model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @slow @require_accelerate @mark.accelerate_tests def test_save_load_low_cpu_mem_usage(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as saved_model_path: for model_class in self.all_model_classes: model_to_save = model_class(config) model_to_save.save_pretrained(saved_model_path) self._check_save_load_low_cpu_mem_usage(model_class, saved_model_path) @slow @require_accelerate @mark.accelerate_tests def test_save_load_low_cpu_mem_usage_checkpoints(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as saved_model_path: for model_class in self.all_model_classes: model_to_save = model_class(config) model_to_save.config.save_pretrained(saved_model_path) torch.save(model_to_save.state_dict(), os.path.join(saved_model_path, "pytorch_model.bin")) self._check_save_load_low_cpu_mem_usage(model_class, saved_model_path) @slow @require_accelerate @mark.accelerate_tests def test_save_load_low_cpu_mem_usage_no_safetensors(self): with tempfile.TemporaryDirectory() as saved_model_path: for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model_to_save = model_class(config) model_to_save.save_pretrained(saved_model_path, safe_serialization=False) self._check_save_load_low_cpu_mem_usage(model_class, saved_model_path) def _check_save_load_low_cpu_mem_usage(self, model_class, saved_model_path): from accelerate.utils.modeling import named_module_tensors # Load the low usage and the normal models. model_low_usage, loading_info = model_class.from_pretrained( saved_model_path, low_cpu_mem_usage=True, output_loading_info=True, ) model_non_low_usage = model_class.from_pretrained(saved_model_path) # Check that there were no missing keys. self.assertEqual(loading_info["missing_keys"], []) # The low_cpu_mem_usage=True causes the model params to be initialized with device=meta, and then # subsequently loaded with the correct values and onto the correct device. We check if there are any # remaining params that were not properly loaded. for name, tensor in named_module_tensors(model_low_usage, recurse=True): self.assertNotEqual( tensor.device, torch.device("meta"), "Tensor '" + name + "' has not been properly loaded and has device=meta.", ) # Check that the parameters are equal. for p1, p2 in zip(model_low_usage.parameters(), model_non_low_usage.parameters()): self.assertEqual(p1.data.ne(p2.data).sum(), 0) # Check that the state dict keys are equal. self.assertEqual(set(model_low_usage.state_dict().keys()), set(model_non_low_usage.state_dict().keys())) # Check that the shared tensors are equal. tensor_ptrs1 = collections.defaultdict(list) for name, tensor in model_low_usage.state_dict().items(): tensor_ptrs1[id_tensor_storage(tensor)].append(name) tied_params1 = [names for _, names in tensor_ptrs1.items() if len(names) > 1] tensor_ptrs2 = collections.defaultdict(list) for name, tensor in model_non_low_usage.state_dict().items(): tensor_ptrs2[id_tensor_storage(tensor)].append(name) tied_params2 = [names for _, names in tensor_ptrs2.items() if len(names) > 1] self.assertEqual(tied_params1, tied_params2) def test_save_load_fast_init_to_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: self.skipTest(reason="Model class not in MODEL_MAPPING") base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = base_class_copy.from_pretrained(tmpdirname) model_slow_init = base_class_copy.from_pretrained(tmpdirname, _fast_init=False) for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = torch.max( model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key] ).item() else: max_diff = torch.max( torch.abs(model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]) ).item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_torch_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: self.skipTest(reason="Model class not in MODEL_MAPPING") base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() def check_equal(loaded): for key in state_dict.keys(): max_diff = torch.max( state_dict()[key] ^ loaded[key] if isinstance(state_dict[key], torch.BoolTensor) else torch.abs(state_dict[key] - loaded[key]) ).item() self.assertLessEqual(max_diff, 1e-6, msg=f"{key} not identical") # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pytorch_model.bin") torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=True) check_equal(load_state_dict(pt_checkpoint_path)) torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=False) check_equal(load_state_dict(pt_checkpoint_path)) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] out_1 = out_1[~np.isneginf(out_1)] out_2 = out_2[~np.isneginf(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_batching_equivalence(self): """ Tests that the model supports batching and that the output is the nearly the same for the same input in different batch sizes. (Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535) """ def get_tensor_equivalence_function(batched_input): # models operating on continuous spaces have higher abs difference than LMs # instead, we can rely on cos distance for image/speech models, similar to `diffusers` if "input_ids" not in batched_input: return lambda tensor1, tensor2: ( 1.0 - F.cosine_similarity(tensor1.float().flatten(), tensor2.float().flatten(), dim=0, eps=1e-38) ) return lambda tensor1, tensor2: torch.max(torch.abs(tensor1 - tensor2)) def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif isinstance(batched_object, dict): for batched_object_value, single_row_object_value in zip( batched_object.values(), single_row_object.values() ): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return else: # indexing the first element does not always work # e.g. models that output similarity scores of size (N, M) would need to index [0, 0] slice_ids = [slice(0, index) for index in single_row_object.shape] batched_row = batched_object[slice_ids] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (equivalence(batched_row, single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={equivalence(batched_row, single_row_object)}." ), ) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() equivalence = get_tensor_equivalence_function(batched_input) for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"): config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: # e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] else: single_row_input[key] = value with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) if isinstance(model_batched_output, torch.Tensor): model_batched_output = {"model_output": model_batched_output} model_row_output = {"model_output": model_row_output} for key in model_batched_output: # DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan` if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key: model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: if ( model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ] or not model_class.supports_gradient_checkpointing ): continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() # unfreeze additional layers for p in model.parameters(): p.requires_grad_(True) optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() for k, v in model.named_parameters(): if v.requires_grad: self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): # Scenario - 1 default behaviour self.check_training_gradient_checkpointing() def test_training_gradient_checkpointing_use_reentrant(self): # Scenario - 2 with `use_reentrant=True` - this is the default value that is used in pytorch's # torch.utils.checkpoint.checkpoint self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) def test_training_gradient_checkpointing_use_reentrant_false(self): # Scenario - 3 with `use_reentrant=False` pytorch suggests users to use this value for # future releases: https://pytorch.org/docs/stable/checkpoint.html self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": False}) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_torchscript_simple(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_attentions = True self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_hidden_state(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True self._create_and_check_torchscript(config, inputs_dict) # This is copied from `torch/testing/_internal/jit_utils.py::clear_class_registry` def clear_torch_jit_class_registry(self): torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() # torch 1.8 has no `_clear_class_state` in `torch.jit._state` if hasattr(torch.jit._state, "_clear_class_state"): torch.jit._state._clear_class_state() def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to `False`") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: for attn_implementation in ["eager", "sdpa"]: if attn_implementation == "sdpa" and (not model_class._supports_sdpa or not is_torch_sdpa_available()): continue configs_no_init._attn_implementation = attn_implementation model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward main_input = inputs[main_input_name] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] model(main_input, attention_mask, decoder_input_ids, decoder_attention_mask) traced_model = torch.jit.trace( model, (main_input, attention_mask, decoder_input_ids, decoder_attention_mask) ) elif "bbox" in inputs and "image" in inputs: # LayoutLMv2 requires additional inputs input_ids = inputs["input_ids"] bbox = inputs["bbox"] image = inputs["image"].tensor model(input_ids, bbox, image) traced_model = torch.jit.trace( model, (input_ids, bbox, image), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif "bbox" in inputs: # Bros requires additional inputs (bbox) input_ids = inputs["input_ids"] bbox = inputs["bbox"] model(input_ids, bbox) traced_model = torch.jit.trace( model, (input_ids, bbox), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif ( "pixel_values" in inputs and "prompt_pixel_values" in inputs and "prompt_masks" in inputs ): # SegGpt requires additional inputs pixel_values = inputs["pixel_values"] prompt_pixel_values = inputs["prompt_pixel_values"] prompt_masks = inputs["prompt_masks"] model(pixel_values, prompt_pixel_values, prompt_masks) traced_model = torch.jit.trace( model, (pixel_values, prompt_pixel_values, prompt_masks), check_trace=False ) # when traced model is checked, an error is produced due to name mangling else: main_input = inputs[main_input_name] if model.config._attn_implementation == "sdpa": trace_input = {main_input_name: main_input} if "attention_mask" in inputs: trace_input["attention_mask"] = inputs["attention_mask"] else: self.skipTest(reason="testing SDPA without attention_mask is not supported") model(main_input, attention_mask=inputs["attention_mask"]) # example_kwarg_inputs was introduced in torch==2.0, but it is fine here since SDPA has a requirement on torch>=2.1. traced_model = torch.jit.trace(model, example_kwarg_inputs=trace_input) else: model(main_input) traced_model = torch.jit.trace(model, (main_input,)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_torch_fx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict) def test_torch_fx_output_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict, output_loss=True) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: self.skipTest( f"Either torch.fx is not available, or the model type {config.model_type} is not compatible with torch.fx" ) configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) # We may want to test several inputs (various shapes, etc.). inputs_to_test = [inputs] if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "inputs_embeds", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", "noise", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") if model.config.model_type in _FX_SUPPORTED_MODELS_WITH_KV_CACHE: input_names.append("past_key_values") # Generally model_tester.prepare_config_and_inputs_for_common seem not to generate past key values inputs. if "past_key_values" not in inputs: batch_size = inputs[next(iter(inputs))].shape[0] num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads cache_shape = (batch_size, num_heads, 0, head_dim) empty_pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=torch_device), torch.rand(cache_shape, dtype=torch.float, device=torch_device), ) for i in range(model.config.num_hidden_layers) ) cache_length = 9 cache_shape = (batch_size, num_heads, cache_length, head_dim) non_empty_pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=torch_device), torch.rand(cache_shape, dtype=torch.float, device=torch_device), ) for i in range(model.config.num_hidden_layers) ) inps = copy.deepcopy(inputs_to_test[0]) inputs_to_test[0]["past_key_values"] = empty_pkv inps["past_key_values"] = non_empty_pkv inputs_to_test.append(inps) past_mask = torch.ones(batch_size, cache_length, device=torch_device, dtype=torch.float) inputs_to_test[1]["attention_mask"] = torch.cat( (past_mask, inputs_to_test[1]["attention_mask"]), dim=1 ) forward_parameters = inspect.signature(model.forward).parameters if "input_ids" in forward_parameters and "inputs_embeds" in forward_parameters: inps = copy.deepcopy(inputs_to_test[0]) embedding_size = ( model.config.embedding_size if getattr(model.config, "embedding_size", None) is not None and model.config.model_type != "megatron-bert" else model.config.hidden_size ) if ( model.config.model_type in MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES and model.__class__.__name__ == MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES[model.config.model_type] ): batch_size, num_choices, sequence_length = inputs["input_ids"].shape shape = (batch_size, num_choices, sequence_length, embedding_size) elif inps["input_ids"].ndim == 2: batch_size, sequence_length = inputs["input_ids"].shape shape = (batch_size, sequence_length, embedding_size) else: self.skipTest("Unknown case") del inps["input_ids"] inps["inputs_embeds"] = torch.rand(shape, dtype=torch.float, device=torch_device) inputs_to_test.append(inps) for inps in inputs_to_test: filtered_inputs = {k: v for (k, v) in inps.items() if k in input_names} input_names_to_trace = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" model.config.use_cache = "past_key_values" in input_names_to_trace traced_model = symbolic_trace(model, input_names_to_trace) with torch.no_grad(): traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_headmasking(self): if not self.test_head_masking: self.skipTest(reason="Model does not support head masking") global_rng.seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() global_rng.seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() # Prepare head_mask # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) head_mask = torch.ones( self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device, ) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) # Test that we can get a gradient back for importance score computation output = sum(t.sum() for t in outputs[0]) output = output.sum() output.backward() multihead_outputs = head_mask.grad self.assertIsNotNone(multihead_outputs) self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( torch.sum(torch.isnan(t)), t.numel() / 4 ) # Check we don't have more than 25% nans (arbitrary) attentions = [ t.masked_fill(torch.isnan(t), 0.0) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) if len(attentions) > 2: # encoder-decoder models have only 2 layers in each module self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_head_pruning(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_pretrained(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_config_init(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_integration(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = {1: [1, 2]} config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) heads_to_prune = {0: [0], 1: [1, 2]} model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2]}) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def test_feed_forward_chunking(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model.eval() hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.manual_seed(0) config.chunk_size_feed_forward = 1 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] self.assertTrue(torch.allclose(hidden_states_no_chunk, hidden_states_with_chunk, atol=1e-3)) def test_resize_position_vector_embeddings(self): if not self.test_resize_position_embeddings: self.skipTest(reason="Model does not have position embeddings") ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() max_position_embeddings = config.max_position_embeddings # Retrieve the embeddings and clone theme if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() encoder_cloned_embeddings = encoder_model_embed.weight.clone() decoder_cloned_embeddings = decoder_model_embed.weight.clone() else: model_embed = model.get_position_embeddings() cloned_embeddings = model_embed.weight.clone() # Check that resizing the position embeddings with a larger max_position_embeddings increases # the model's postion embeddings size model.resize_position_embeddings(max_position_embeddings + 10) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings + 10) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] + 10) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] + 10) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the position embeddings with a smaller max_position_embeddings decreases # the model's max_position_embeddings model.resize_position_embeddings(max_position_embeddings - 5) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings - 5) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] - 5) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] - 5) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 5) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True if model.config.is_encoder_decoder: for p1, p2 in zip(encoder_cloned_embeddings, encoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False for p1, p2 in zip(decoder_cloned_embeddings, decoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False else: for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to `False`") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_embed_pre_resize = model.get_input_embeddings() type_model_embed_pre_resize = type(model_embed_pre_resize) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.text_config.vocab_size if hasattr(config, "text_config") else config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertEqual(new_model_vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check to make sure the type of embeddings returned post resizing is same as type of input type_model_embed_post_resize = type(model_embed) self.assertEqual(type_model_embed_pre_resize, type_model_embed_post_resize) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertEqual(new_model_vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # make sure that decoder_input_ids are resized as well if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size if hasattr(config, "text_config") else config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertTrue(new_model_vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], new_model_vocab_size) self.assertTrue(new_model_vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to `False`") original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: self.skipTest(reason="Model cannot untied embeddings") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.text_config.vocab_size if hasattr(config, "text_config") else config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertEqual(new_model_vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) new_model_vocab_size = ( model.config.text_config.vocab_size if hasattr(model.config, "text_config") else model.config.vocab_size ) self.assertEqual(new_model_vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_model_get_set_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding, AdaptiveEmbedding)) new_input_embedding_layer = nn.Embedding(10, 10) model.set_input_embeddings(new_input_embedding_layer) self.assertEqual(model.get_input_embeddings(), new_input_embedding_layer) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_correct_missing_keys(self): if not self.test_missing_keys: self.skipTest(reason="test_missing_keys is set to `False`") config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) base_model_prefix = model.base_model_prefix if hasattr(model, base_model_prefix): extra_params = {k: v for k, v in model.named_parameters() if not k.startswith(base_model_prefix)} extra_params.update({k: v for k, v in model.named_buffers() if not k.startswith(base_model_prefix)}) # Some models define this as None if model._keys_to_ignore_on_load_missing: for key in model._keys_to_ignore_on_load_missing: extra_params.pop(key, None) if not extra_params: # In that case, we *are* on a head model, but every # single key is not actual parameters and this is # tested in `test_tied_model_weights_key_ignore` test. continue with tempfile.TemporaryDirectory() as temp_dir_name: model.base_model.save_pretrained(temp_dir_name) model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) self.assertGreater(len(loading_info["missing_keys"]), 0, model.__class__.__name__) def test_tie_model_weights(self): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to `False`") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(check_same_values(embeddings, decoding)) # Check that after resize they remain tied. vocab_size = config.text_config.vocab_size if hasattr(config, "text_config") else config.vocab_size model_tied.resize_token_embeddings(vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) @require_safetensors def test_can_use_safetensors(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model_tied = model_class(config) with tempfile.TemporaryDirectory() as d: try: model_tied.save_pretrained(d, safe_serialization=True) except Exception as e: raise Exception(f"Class {model_class.__name__} cannot be saved using safetensors: {e}") model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model_tied.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) # Checking the tensor sharing are correct ptrs = defaultdict(list) for k, v in model_tied.state_dict().items(): ptrs[v.data_ptr()].append(k) shared_ptrs = {k: v for k, v in ptrs.items() if len(v) > 1} for _, shared_names in shared_ptrs.items(): reloaded_ptrs = {reloaded_state[k].data_ptr() for k in shared_names} self.assertEqual( len(reloaded_ptrs), 1, f"The shared pointers are incorrect, found different pointers for keys {shared_names}", ) def test_load_save_without_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = False for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) def test_tied_weights_keys(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = True for model_class in self.all_model_classes: model_tied = model_class(config) ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else [] # Detect we get a hit for each key for key in tied_weight_keys: is_tied_key = any(re.search(key, p) for group in tied_params for p in group) self.assertTrue(is_tied_key, f"{key} is not a tied weight key for {model_class}.") # Removed tied weights found from tied params -> there should only be one left after for key in tied_weight_keys: for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] tied_params = [group for group in tied_params if len(group) > 1] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) def test_model_weights_reload_no_missing_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # We are nuking ALL weights on file, so every parameter should # yell on load. We're going to detect if we yell too much, or too little. placeholder_dict = {"tensor": torch.tensor([1, 2])} safe_save_file(placeholder_dict, os.path.join(tmp_dir, "model.safetensors"), metadata={"format": "pt"}) model_reloaded, infos = model_class.from_pretrained(tmp_dir, output_loading_info=True) prefix = f"{model_reloaded.base_model_prefix}." params = dict(model_reloaded.named_parameters()) params.update(dict(model_reloaded.named_buffers())) param_names = {k[len(prefix) :] if k.startswith(prefix) else k for k in params.keys()} missing_keys = set(infos["missing_keys"]) extra_missing = missing_keys - param_names # Remove tied weights from extra missing: they are normally not warned as missing if their tied # counterpart is present but here there are no weights at all so we do get the warning. ptrs = collections.defaultdict(list) for name, tensor in model_reloaded.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) tied_params = [names for _, names in ptrs.items() if len(names) > 1] for group in tied_params: group = {k[len(prefix) :] if k.startswith(prefix) else k for k in group} # We remove the group from extra_missing if not all weights from group are in it if len(group - extra_missing) > 0: extra_missing = extra_missing - set(group) self.assertEqual( extra_missing, set(), f"This model {model_class.__name__} might be missing some `keys_to_ignore`: {extra_missing}. " f"For debugging, tied parameters are {tied_params}", ) missed_missing = param_names - missing_keys # Remove nonpersistent buffers from missed_missing buffers = [n for n, _ in model_reloaded.named_buffers()] nonpersistent_buffers = {n for n in buffers if n not in model_reloaded.state_dict()} nonpersistent_buffers = { k[len(prefix) :] if k.startswith(prefix) else k for k in nonpersistent_buffers } missed_missing = missed_missing - nonpersistent_buffers if model_reloaded._keys_to_ignore_on_load_missing is None: expected_missing = set() else: expected_missing = set(model_reloaded._keys_to_ignore_on_load_missing) self.assertEqual( missed_missing, expected_missing, f"This model {model_class.__name__} ignores keys {missed_missing} but they look like real" " parameters. If they are non persistent buffers make sure to instantiate them with" " `persistent=False`", ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = torch.cat( [torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = torch.cat( # [torch.zeros_like(attention_mask[:1], dtype=attention_mask.dtype), attention_mask[1:]], # dim=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "FlaubertWithLMHeadModel", "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("GPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs # Copied from tests.test_modeling_tf_common.TFModelTesterMixin.check_pt_tf_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) # Don't copy this block to model specific test file! # TODO: remove this method and this line after issues are fixed tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `attributes` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual(max_diff, tol, f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}).") else: raise ValueError( "`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, tensor in pt_inputs_dict.items(): # skip key that does not exist in tf if isinstance(tensor, bool): tf_inputs_dict[key] = tensor elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) # other general float inputs elif tensor.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(pt_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning if not hasattr(transformers, tf_model_class_name): self.skipTest(reason="transformers does not have TF version of this model yet") # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) tf_model = tf_model_class(config) pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, # Not all models accept "labels" in the forward pass (yet :) ) return_labels=True if "labels" in inspect.signature(model_class.forward).parameters.keys() else False, ) # make sure only tf inputs are forward that actually exist in function args tf_input_keys = set(inspect.signature(tf_model.call).parameters.keys()) # remove all head masks tf_input_keys.discard("head_mask") tf_input_keys.discard("cross_attn_head_mask") tf_input_keys.discard("decoder_head_mask") pt_inputs_dict = {k: v for k, v in pt_inputs_dict.items() if k in tf_input_keys} pt_inputs_dict_with_labels = {k: v for k, v in pt_inputs_dict_with_labels.items() if k in tf_input_keys} # For some models (e.g. base models), there is no label returned. # Set the input dict to `None` to avoid check outputs twice for the same input dicts. if not set(pt_inputs_dict_with_labels.keys()).symmetric_difference(pt_inputs_dict.keys()): pt_inputs_dict_with_labels = None # Check we can load pt model in tf and vice-versa with model => model functions # Here requires `tf_inputs_dict` to build `tf_model` tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): self.skipTest(reason="No Flax model exists for this class") # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): self.skipTest(reason="No Flax model exists for this class") # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained( tmpdirname, from_flax=True, attn_implementation=fx_model.config._attn_implementation ) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_MAPPING_NAMES): continue model = model_class(config) model.to(torch_device) model.eval() model_forward_args = inspect.signature(model.forward).parameters if "inputs_embeds" not in model_forward_args: self.skipTest(reason="This model doesn't use `inputs_embeds`") inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pad_token_id = config.pad_token_id if config.pad_token_id is not None else 1 wte = model.get_input_embeddings() if not self.is_encoder_decoder: input_ids = inputs["input_ids"] # some models infer position ids/attn mask differently when input ids # by check if pad_token let's make sure no padding is in input ids not_pad_token_id = pad_token_id + 1 if max(0, pad_token_id - 1) == 0 else pad_token_id - 1 input_ids[input_ids == pad_token_id] = not_pad_token_id del inputs["input_ids"] inputs_embeds = wte(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) encoder_input_ids[encoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) decoder_input_ids[decoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) inputs_embeds = wte(encoder_input_ids) decoder_inputs_embeds = wte(decoder_input_ids) with torch.no_grad(): out_ids = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids, **inputs)[0] out_embeds = model( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **inputs )[0] self.assertTrue(torch.allclose(out_embeds, out_ids)) def test_inputs_embeds_matches_input_ids_with_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES): continue model = model_class(config) model.to(torch_device) model.eval() model_forward_args = inspect.signature(model.forward).parameters if any(argument not in model_forward_args for argument in ["inputs_embeds", "position_ids"]): self.skipTest(reason="This model doesn't use `inputs_embeds` or `position_ids`.") has_inputs_embeds_forwarding = "inputs_embeds" in set( inspect.signature(model.prepare_inputs_for_generation).parameters.keys() ) if not has_inputs_embeds_forwarding: self.skipTest(reason="This model doesn't support `inputs_embeds` passed to `generate`.") inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pad_token_id = config.pad_token_id if config.pad_token_id is not None else 1 wte = model.get_input_embeddings() if not self.is_encoder_decoder: input_ids = inputs["input_ids"] # some models infer position ids/attn mask differently when input ids # by check if pad_token let's make sure no padding is in input ids not_pad_token_id = pad_token_id + 1 if max(0, pad_token_id - 1) == 0 else pad_token_id - 1 input_ids[input_ids == pad_token_id] = not_pad_token_id del inputs["input_ids"] inputs_embeds = wte(input_ids) out_ids = model.generate(input_ids=input_ids, **inputs, max_new_tokens=2)[:, -2:] out_embeds = model.generate(inputs_embeds=inputs_embeds, **inputs, max_new_tokens=2) else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) encoder_input_ids[encoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) decoder_input_ids[decoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) inputs_embeds = wte(encoder_input_ids) decoder_inputs_embeds = wte(decoder_input_ids) out_ids = model.generate( input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids, **inputs, max_new_tokens=2 )[:, -2:] out_embeds = model.generate( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **inputs, max_new_tokens=2, ) self.assertTrue(torch.allclose(out_embeds, out_ids)) @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch_multi_gpu def test_model_parallelization(self): if not self.test_model_parallel: self.skipTest(reason="test_model_parallel is set to False") # a candidate for testing_utils def get_current_gpu_memory_use(): """returns a list of cuda memory allocations per GPU in MBs""" per_device_memory = [] for id in range(torch.cuda.device_count()): with torch.cuda.device(id): per_device_memory.append(torch.cuda.memory_allocated() >> 20) return per_device_memory # Needs a large model to see the difference. config = self.model_tester.get_large_model_config() for model_class in self.all_parallelizable_model_classes: torch.cuda.empty_cache() # 1. single gpu memory load + unload + memory measurements # Retrieve initial memory usage (can easily be ~0.6-1.5GB if cuda-kernels have been preloaded by previous tests) memory_at_start = get_current_gpu_memory_use() # Put model on device 0 and take a memory snapshot model = model_class(config) model.to("cuda:0") memory_after_model_load = get_current_gpu_memory_use() # The memory use on device 0 should be higher than it was initially. self.assertGreater(memory_after_model_load[0], memory_at_start[0]) del model gc.collect() torch.cuda.empty_cache() # 2. MP test # it's essential to re-calibrate the usage before the next stage memory_at_start = get_current_gpu_memory_use() # Spread model layers over multiple devices model = model_class(config) model.parallelize() memory_after_parallelization = get_current_gpu_memory_use() # Assert that the memory use on all devices is higher than it was when loaded only on CPU for n in range(len(model.device_map.keys())): self.assertGreater(memory_after_parallelization[n], memory_at_start[n]) # Assert that the memory use of device 0 is lower than it was when the entire model was loaded on it self.assertLess(memory_after_parallelization[0], memory_after_model_load[0]) # Assert that the memory use of device 1 is higher than it was when the entire model was loaded # on device 0 and device 1 wasn't used at all self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1]) del model gc.collect() torch.cuda.empty_cache() @require_torch_multi_gpu def test_model_parallel_equal_results(self): if not self.test_model_parallel: self.skipTest(reason="test_model_parallel is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): output[k] = v.to(device) else: output[k] = v return output model = model_class(config) output = model(**cast_to_device(inputs_dict, "cpu")) model.parallelize() parallel_output = model(**cast_to_device(inputs_dict, "cuda:0")) for value, parallel_value in zip(output, parallel_output): if isinstance(value, torch.Tensor): self.assertTrue(torch.allclose(value, parallel_value.to("cpu"), atol=1e-7)) elif isinstance(value, (Tuple, List)): for value_, parallel_value_ in zip(value, parallel_value): self.assertTrue(torch.allclose(value_, parallel_value_.to("cpu"), atol=1e-7)) def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): # Find device in device_map while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) elif param_device in ["mps"]: self.assertEqual(param.device, torch.device("mps")) else: # when loaded with device_map, `param_device` are integer values for cuda/xpu/npu/mlu self.assertEqual(param.device, torch.device(f"{torch_device}:{param_device}")) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_disk_offload_bin(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} # This errors out cause it's missing an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0])) else: self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_disk_offload_safetensors(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} # This doesn't error out as it's in safetensors and doesn't need an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0])) else: self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_cpu_offload(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0])) else: self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator def test_model_parallelism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): self.assertTrue(torch.allclose(a, b, atol=1e-5) for a, b in zip(base_output[0], new_output[0])) else: self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if model_class.__name__ not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), ]: continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: self.skipTest(reason="test_missmatched_shapes is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(RuntimeError): new_model_without_prefix = AutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = AutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) new_model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = AutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) new_model_without_prefix.to(torch_device) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_mismatched_shapes_have_properly_initialized_weights(self): if not self.test_mismatched_shapes: self.skipTest(reason="test_missmatched_shapes is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: mappings = [ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, ] is_classication_model = any(model_class.__name__ in get_values(mapping) for mapping in mappings) if not is_classication_model: continue # TODO: ydshieh is_special_classes = model_class.__name__ in [ "wav2vec2.masked_spec_embed", "Wav2Vec2ForSequenceClassification", "CLIPForImageClassification", "RegNetForImageClassification", "ResNetForImageClassification", "UniSpeechSatForSequenceClassification", "Wav2Vec2BertForSequenceClassification", "PvtV2ForImageClassification", "Wav2Vec2ConformerForSequenceClassification", "WavLMForSequenceClassification", "SwiftFormerForImageClassification", "SEWForSequenceClassification", "BitForImageClassification", "SEWDForSequenceClassification", "SiglipForImageClassification", "HubertForSequenceClassification", "Swinv2ForImageClassification", "Data2VecAudioForSequenceClassification", "UniSpeechForSequenceClassification", "PvtForImageClassification", ] special_param_names = [ r"^bit\.", r"^classifier\.weight", r"^classifier\.bias", r"^classifier\..+\.weight", r"^classifier\..+\.bias", r"^data2vec_audio\.", r"^dist_head\.", r"^head\.", r"^hubert\.", r"^pvt\.", r"^pvt_v2\.", r"^regnet\.", r"^resnet\.", r"^sew\.", r"^sew_d\.", r"^swiftformer\.", r"^swinv2\.", r"^transformers\.models\.swiftformer\.", r"^unispeech\.", r"^unispeech_sat\.", r"^vision_model\.", r"^wav2vec2\.", r"^wav2vec2_bert\.", r"^wav2vec2_conformer\.", r"^wavlm\.", ] with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(configs_no_init) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = model_class.from_pretrained(tmp_dir, num_labels=42) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = model_class.from_pretrained(tmp_dir, num_labels=42, ignore_mismatched_sizes=True) self.assertIn("the shapes did not match", cl.out) for name, param in new_model.named_parameters(): if param.requires_grad: param_mean = ((param.data.mean() * 1e9).round() / 1e9).item() if not ( is_special_classes and any(len(re.findall(target, name)) > 0 for target in special_param_names) ): self.assertIn( param_mean, [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: # Here we allow the parameters' mean to be in the range [-5.0, 5.0] instead of being # either `0.0` or `1.0`, because their initializations are not using # `config.initializer_factor` (or something similar). The purpose of this test is simply # to make sure they are properly initialized (to avoid very large value or even `nan`). self.assertGreaterEqual( param_mean, -5.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertLessEqual( param_mean, 5.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist(self): # 1. Create a dummy class. Should have buffers as well? To make sure we test __init__ class MyClass(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config=None): super().__init__(config if config is not None else PretrainedConfig()) self.linear = nn.Linear(10, config.num_labels, bias=True) self.embedding = nn.Embedding(10, 10) self.std = 1 def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data = nn.init.kaiming_uniform_(module.weight.data, np.sqrt(5)) if module.bias is not None: module.bias.data = module.bias.data.normal_(mean=0.0, std=self.std) # Used to make sure the weights with matched shape are loaded correctly config = PretrainedConfig() config.num_labels = 3 model = MyClass(config=config) # Used to make sure the weights with mismatched shape are properly initialized set_seed(0) config = PretrainedConfig() config.num_labels = 4 # not to init. the weights during the creation: to match the logic in `from_pretrained`, so we can keep the # same sequence of random ops in the execution path to allow us to compare `target_model` and `new_model` below # for `linear` part. with ContextManagers([no_init_weights(True)]): target_model = MyClass(config=config) target_model.apply(target_model._initialize_weights) with tempfile.TemporaryDirectory() as tmpdirname: state_dict = model.state_dict() del state_dict["linear.weight"] model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) set_seed(0) new_model = MyClass.from_pretrained(tmpdirname, num_labels=4, ignore_mismatched_sizes=True) for key in new_model.state_dict().keys(): # check weight values for weights with matched shapes are identical # (i.e. correctly loaded from the checkpoint) if key not in ["linear.weight", "linear.bias"]: max_diff = torch.max(torch.abs(model.state_dict()[key] - new_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `model` are not identical", ) else: # check we have some mismatched shapes self.assertNotEqual( model.state_dict()[key].shape, new_model.state_dict()[key].shape, msg=f"the weight shapes for {key} in `model` and `new_model` should differ", ) # check the weights with mismatched shape are properly initialized max_diff = torch.max(torch.abs(new_model.state_dict()[key] - target_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `target_model` are not identical", ) def test_model_is_small(self): # Just a consistency check to make sure we are not running tests on 80M parameter models. config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) num_params = model.num_parameters() assert ( num_params < 1000000 ), f"{model_class} is too big for the common tests ({num_params})! It should have 1M max." @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_conversion(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2" ).to(torch_device) for _, module in model.named_modules(): if "FlashAttention" in module.__class__.__name__: return self.assertTrue(False, "FlashAttention2 modules not found in model") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence_right_padding(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_generate_left_padding(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # make sure we do left padding dummy_attention_mask[:, :-1] = 0 dummy_attention_mask[:, -1:] = 1 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) out_fa = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) self.assertTrue(torch.allclose(out, out_fa)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @is_flaky() @slow def test_flash_attn_2_generate_padding_right(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True).to( torch_device ) dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) # make sure we do right padding dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 out = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) out_fa = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=1, do_sample=False ) self.assertTrue(torch.allclose(out, out_fa)) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow def test_eager_matches_sdpa_inference(self, torch_dtype: str): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_dtype == "float16" and not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if torch_dtype == "bfloat16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Not sure whether it's fine to put torch.XXX in a decorator if torch is not available so hacking it here instead. if torch_dtype == "float16": torch_dtype = torch.float16 elif torch_dtype == "bfloat16": torch_dtype = torch.bfloat16 elif torch_dtype == "float32": torch_dtype = torch.float32 atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, ("cuda", True, torch.float16): 5e-3, } def get_mean_reldiff(failcase, x, ref, atol, rtol): return f"{failcase}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) # FIXME: we deactivate boolean mask for models using "use_mask_token" in their constructors. # These models support masking only in the case `use_mask_token=True`. Otherwise they cannot consume an input mask. # This means that the class needs to be instantiated much later, after `use_mask` is set, which means a significant refactor of the code. # However masking there is not done at any layers that matters (i.e self-attention), therefore we can safely deactivate it. deactivate_mask = "use_mask_token" in inspect.signature(model_class).parameters is_encoder_decoder = model.config.is_encoder_decoder with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa and model_sdpa.config.model_type != "falcon": raise ValueError("The SDPA model should have SDPA attention layers") # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving 16 times the model, # but it would be nicer to have an efficient way to use parameterized.expand fail_cases = [] for padding_side in ["left", "right"]: for use_mask in [False, True]: for output_attentions in [True, False]: can_output_attn = "output_attentions" in inspect.signature(model_sdpa.forward).parameters if not (self.has_attentions and can_output_attn) and output_attentions: continue for batch_size in [1, 5]: dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: dummy_input = dummy_input.to(torch_dtype) dummy_input = dummy_input[:batch_size] if dummy_input.shape[0] != batch_size: if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: extension = torch.rand( batch_size - dummy_input.shape[0], *dummy_input.shape[1:], dtype=torch_dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) else: extension = torch.randint( high=5, size=(batch_size - dummy_input.shape[0], *dummy_input.shape[1:]), dtype=dummy_input.dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) if not use_mask: dummy_attention_mask = None else: dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is None: if is_encoder_decoder: seqlen = inputs_dict.get("decoder_input_ids", dummy_input).shape[-1] else: seqlen = dummy_input.shape[-1] dummy_attention_mask = ( torch.ones(batch_size, seqlen).to(torch.int64).to(torch_device) ) dummy_attention_mask = dummy_attention_mask[:batch_size] if dummy_attention_mask.shape[0] != batch_size: extension = torch.ones( batch_size - dummy_attention_mask.shape[0], *dummy_attention_mask.shape[1:], dtype=dummy_attention_mask.dtype, device=torch_device, ) dummy_attention_mask = torch.cat((dummy_attention_mask, extension), dim=0) dummy_attention_mask = dummy_attention_mask.to(torch_device) dummy_attention_mask[:] = 1 if padding_side == "left": dummy_attention_mask[-1, :-1] = 1 dummy_attention_mask[-1, -4:] = 0 elif padding_side == "right": dummy_attention_mask[-1, 1:] = 1 dummy_attention_mask[-1, :3] = 0 for enable_kernels in [False, True]: failcase = f"padding_side={padding_side}, use_mask={use_mask}, batch_size={batch_size}, enable_kernels={enable_kernels}" if is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[ :batch_size ] if decoder_input_ids.shape[0] != batch_size: extension = torch.ones( batch_size - decoder_input_ids.shape[0], *decoder_input_ids.shape[1:], dtype=decoder_input_ids.dtype, device=torch_device, ) decoder_input_ids = torch.cat((decoder_input_ids, extension), dim=0) decoder_input_ids = decoder_input_ids.to(torch_device) # TODO: never an `attention_mask` arg here? processed_inputs = { model.main_input_name: dummy_input, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } else: processed_inputs = { model.main_input_name: dummy_input, "output_hidden_states": True, } # Otherwise fails for e.g. WhisperEncoderModel if "attention_mask" in inspect.signature(model_eager.forward).parameters: processed_inputs["attention_mask"] = dummy_attention_mask if ( self.has_attentions and "output_attentions" in inspect.signature(model_sdpa.forward).parameters ): processed_inputs["output_attentions"] = output_attentions if not deactivate_mask and ( "bool_masked_pos" in inspect.signature(model_eager.forward).parameters ): dummy_mask = torch.ones((self.model_tester.num_masks,)) # In case of additional token (like class) we define a custom `mask_length` if hasattr(self.model_tester, "mask_length"): mask_length = self.model_tester.mask_length - dummy_mask.size(0) else: mask_length = self.model_tester.seq_length - dummy_mask.size(0) dummy_mask = torch.cat([dummy_mask, torch.zeros(mask_length)]) dummy_bool_masked_pos = dummy_mask.expand(batch_size, -1).bool() processed_inputs["bool_masked_pos"] = dummy_bool_masked_pos.to(torch_device) if "noise" in inspect.signature(model_eager.forward).parameters: np.random.seed(2) num_patches = int( (self.model_tester.image_size // self.model_tester.patch_size) ** 2 ) noise = np.random.uniform(size=(batch_size, num_patches)) processed_inputs["noise"] = torch.from_numpy(noise) # TODO: test gradients as well (& for FA2 as well!) with torch.no_grad(): with torch.backends.cuda.sdp_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): prepared_inputs = self._prepare_for_class(processed_inputs, model_class) outputs_eager = model_eager(**prepared_inputs) outputs_sdpa = model_sdpa(**prepared_inputs) logits_eager = ( outputs_eager.hidden_states[-1] if not is_encoder_decoder else outputs_eager.decoder_hidden_states[-1] ) logits_sdpa = ( outputs_sdpa.hidden_states[-1] if not is_encoder_decoder else outputs_sdpa.decoder_hidden_states[-1] ) if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, torch_dtype] rtol = rtols[torch_device, enable_kernels, torch_dtype] else: atol = 1e-7 rtol = 1e-4 # Masked tokens output slightly deviates - we don't mind that. if use_mask: if padding_side == "left": sub_sdpa = logits_sdpa[:-1] sub_eager = logits_eager[:-1] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) sub_sdpa = logits_sdpa[-1, :-4] sub_eager = logits_eager[-1, :-4] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) # Testing the padding tokens is not really meaningful but anyway # sub_sdpa = logits_sdpa[-1, -4:] # sub_eager = logits_eager[-1, -4:] # if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): # fail_cases.append(get_mean_reldiff(failcase, sub_sdpa, sub_eager, 4e-2, 4e-2)) elif padding_side == "right": sub_sdpa = logits_sdpa[:-1] sub_eager = logits_eager[:-1] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) sub_sdpa = logits_sdpa[-1, 3:] sub_eager = logits_eager[-1, 3:] if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, sub_sdpa, sub_eager, atol, rtol) ) # Testing the padding tokens is not really meaningful but anyway # sub_sdpa = logits_sdpa[-1, :3] # sub_eager = logits_eager[-1, :3] # if not torch.allclose(sub_sdpa, sub_eager, atol=atol, rtol=rtol): # fail_cases.append(get_mean_reldiff(failcase, sub_sdpa, sub_eager, 4e-2, 4e-2)) else: if not torch.allclose(logits_sdpa, logits_eager, atol=atol, rtol=rtol): fail_cases.append( get_mean_reldiff(failcase, logits_sdpa, logits_eager, atol, rtol) ) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) @require_torch_sdpa @require_torch_gpu @slow def test_sdpa_can_dispatch_on_flash(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") compute_capability = torch.cuda.get_device_capability() major, _ = compute_capability if not torch.version.cuda or major < 8: self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0") for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) if config.model_type in ["llava", "llava_next", "vipllava", "video_llava"]: self.skipTest( reason="Llava-like models currently (transformers==4.39.1) requires an attention_mask input" ) if config.model_type in ["paligemma"]: self.skipTest( "PaliGemma-like models currently (transformers==4.41.0) requires an attention_mask input" ) if config.model_type in ["idefics"]: self.skipTest(reason="Idefics currently (transformers==4.39.1) requires an image_attention_mask input") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="sdpa") model.to(torch_device) inputs_dict.pop("attention_mask", None) inputs_dict.pop("decoder_attention_mask", None) for name, inp in inputs_dict.items(): if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]: inputs_dict[name] = inp.to(torch.float16) with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): _ = model(**inputs_dict) @require_torch_sdpa @require_torch_accelerator @slow def test_sdpa_can_compile_dynamic(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if "cuda" in torch_device: compute_capability = torch.cuda.get_device_capability() major, _ = compute_capability if not torch.version.cuda or major < 8: self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0") for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) if config.model_type in ["dbrx"]: self.skipTest( "DBRX (transformers==4.40) requires a modification to support dynamic shapes with compile." ) model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="sdpa") model.to(torch_device) # For PyTorch 2.1 - 2.3.0 set `dynamic=True`. In the future setting `dynamic=None` and using `torch._dynamo.mark_dynamic()` # on input tensors will be required. `mark_dynamic` currently raises inconsistent shape errors. model = torch.compile(model, dynamic=True) inputs_dict.pop("attention_mask", None) inputs_dict.pop("decoder_attention_mask", None) for name, inp in inputs_dict.items(): if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]: inputs_dict[name] = inp.to(torch.float16) # use no_grad to save some memory with torch.no_grad(): _ = model(**inputs_dict) @require_torch_sdpa @slow def test_eager_matches_sdpa_generate(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 30 if len(self.all_generative_model_classes) == 0: self.skipTest(f"{self.__class__.__name__} tests a model that does support generate: skipping this test") for model_class in self.all_generative_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, ).to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, low_cpu_mem_usage=True, attn_implementation="eager", ).to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa: raise ValueError("The SDPA model should have SDPA attention layers") # Just test that a large cache works as expected res_eager = model_eager.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) res_sdpa = model_sdpa.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False ) self.assertTrue(torch.allclose(res_eager, res_sdpa)) @require_torch_sdpa def test_sdpa_matches_eager_sliding_window(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") WINDOW_ATTENTION_MODELS = ["mistral", "mixtral", "qwen2", "qwen_moe", "starcoder2"] if len(self.all_generative_model_classes) == 0: self.skipTest(f"No generative model classes for {self.__class__.__name__}") for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.model_type not in WINDOW_ATTENTION_MODELS: self.skipTest(f"{config.model_type} does not use window attention") config.sliding_window = 2 dummy_input = inputs_dict[model_class.main_input_name] attention_mask = inputs_dict["attention_mask"] self.assertTrue(dummy_input.ndim == 2) self.assertTrue(dummy_input.shape[1] > 6) with tempfile.TemporaryDirectory() as tmpdir: with torch.device(torch_device): model_eager = AutoModelForCausalLM.from_config( config, attn_implementation="eager", torch_dtype=torch.float32 ) model_eager.save_pretrained(tmpdir) with torch.device(torch_device): model_sdpa = AutoModelForCausalLM.from_pretrained( tmpdir, attn_implementation="sdpa", torch_dtype=torch.float32 ) model_eager = model_eager.eval() model_sdpa = model_sdpa.eval() with torch.no_grad(): with torch.backends.cuda.sdp_kernel( enable_flash=False, enable_math=True, enable_mem_efficient=False, ): res_eager = model_eager(**inputs_dict, return_dict=False)[0] res_sdpa = model_sdpa(**inputs_dict, return_dict=False)[0] # Only non-padding tokens are expected to match. self.assertTrue( torch.allclose(res_eager[attention_mask == 1], res_sdpa[attention_mask == 1], rtol=1e-4, atol=1e-4) ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_use_cache(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 30 for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # Just test that a large cache works as expected _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) # Generate with one batch only to test generation when attention mask will be None # when real inputs are used, because there is no padding. See issue #32237 for more dummy_input = dummy_input[:1, ...] dummy_attention_mask = torch.ones_like(dummy_attention_mask[:1, ...]) _ = model.generate( dummy_input, attention_mask=dummy_attention_mask, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_generate_reuse_cache(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 2 for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = dummy_input.shape[1] * 2 + max_new_tokens * 2 + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ).to(torch_device) # run generate once to get filled cache output = model.generate( dummy_input, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, return_dict_in_generate=True, ) past_key_values = output.past_key_values # Try to continue generation from where we left, given that we have more than 1 new token to process # e.g. this can happen in speculative decoding when feeding candidate tokens back to target model dummy_input_updated = torch.cat([dummy_input, output.sequences], dim=-1) _ = model.generate( dummy_input_updated, max_new_tokens=max_new_tokens, do_sample=False, use_cache=True, past_key_values=past_key_values, ) @require_flash_attn @require_torch_gpu @require_bitsandbytes @mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) batch_size = dummy_attention_mask.shape[0] is_padding_right = dummy_attention_mask[:, -1].sum().item() != batch_size # To avoid errors with padding_side=="right" if is_padding_right: dummy_attention_mask = torch.ones_like(dummy_input) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, load_in_4bit=True, ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if model.config.is_encoder_decoder: dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] _ = model(dummy_input, decoder_input_ids=dummy_decoder_input_ids) # with attention mask _ = model( dummy_input, attention_mask=dummy_attention_mask, decoder_input_ids=dummy_decoder_input_ids, decoder_attention_mask=dummy_decoder_attention_mask, ) else: _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 30 for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict: self.skipTest("Model dummy inputs should contain padding in their attention mask") dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # ensure left padding, to adapt for some models if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.pad_token_id model = ( model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ) .to(torch_device) .eval() ) # flatten padfree_inputs_dict = { k: v[dummy_attention_mask.bool()].unsqueeze(0) for k, v in inputs_dict.items() if not k == "attention_mask" } # add position_ids padfree_inputs_dict["position_ids"] = ( torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()]) .long() .unsqueeze(0) .to(torch_device) ) res_padded = model(**inputs_dict) res_padfree = model(**padfree_inputs_dict) logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()] logits_padfree = res_padfree.logits[0] torch.testing.assert_close(logits_padded.argmax(-1), logits_padfree.argmax(-1), atol=0, rtol=0) # acceptable numerical instability tol = torch.finfo(torch.float16).eps torch.testing.assert_close(logits_padded, logits_padfree, atol=tol, rtol=tol) @is_pt_tf_cross_test def test_tf_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning if not hasattr(transformers, tf_model_class_name): self.skipTest(reason="transformers does not have this model in TF version yet") tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) tf_model_1 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) tf_model_2 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) # Check models are equal for p1, p2 in zip(tf_model_1.weights, tf_model_2.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_flax_cross_test def test_flax_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() flax_model_class_name = "Flax" + model_class.__name__ # Add the "Flax at the beginning if not hasattr(transformers, flax_model_class_name): self.skipTest(reason="transformers does not have this model in Flax version yet") flax_model_class = getattr(transformers, flax_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) flax_model_1 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) flax_model_2 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) # Check models are equal self.assertTrue(check_models_equal(flax_model_1, flax_model_2)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_from_config(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, _ = self.model_tester.prepare_config_and_inputs_for_common() # TODO: to change it in the future with other relevant auto classes fa2_model = AutoModelForCausalLM.from_config( config, attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16 ).to(torch_device) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) fa2_correctly_converted = False for _, module in fa2_model.named_modules(): if "FlashAttention" in module.__class__.__name__: fa2_correctly_converted = True break self.assertTrue(fa2_correctly_converted) _ = fa2_model(input_ids=dummy_input, attention_mask=dummy_attention_mask) with tempfile.TemporaryDirectory() as tmpdirname: fa2_model.save_pretrained(tmpdirname) model_from_pretrained = AutoModelForCausalLM.from_pretrained(tmpdirname) self.assertTrue(model_from_pretrained.config._attn_implementation != "flash_attention_2") fa2_correctly_converted = False for _, module in model_from_pretrained.named_modules(): if "FlashAttention" in module.__class__.__name__: fa2_correctly_converted = True break self.assertFalse(fa2_correctly_converted) def _get_custom_4d_mask_test_data(self): # Sequence in which all but the last token is the same input_ids = torch.tensor( [[10, 11, 12, 13], [10, 11, 12, 14], [10, 11, 12, 15]], device=torch_device, dtype=torch.int64 ) position_ids = torch.tensor([[0, 1, 2, 3]] * 3, device=torch_device, dtype=torch.int64) # Combining common prefix with the unique ending tokens: input_ids_shared_prefix = torch.cat([input_ids[0][:-1], input_ids[:, -1]]).unsqueeze(0) # Creating a 4D mask where each of the last 3 tokens do not attend to each other. mask_shared_prefix = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 1], ] ] ], ) # inverting the attention mask mask_dtype = torch.float32 min_dtype = torch.finfo(mask_dtype).min mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=mask_dtype, device=torch_device) * min_dtype # Creating a position_ids tensor. note the repeating figures in the end. position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64) return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix def test_custom_4d_attention_mask(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if len(self.all_generative_model_classes) == 0: self.skipTest( reason="Model architecture has no generative classes, and thus not necessarily supporting 4D masks" ) for model_class in self.all_generative_model_classes: if not model_class._supports_static_cache: self.skipTest(f"{model_class.__name__} is not guaranteed to work with custom 4D attention masks") config, _ = self.model_tester.prepare_config_and_inputs_for_common() if getattr(config, "sliding_window", 0) > 0: self.skipTest(f"{model_class.__name__} with sliding window attention is not supported by this test") model = model_class(config).to(device=torch_device, dtype=torch.float32) ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self._get_custom_4d_mask_test_data() logits = model.forward(input_ids, position_ids=position_ids).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens) normalized_1 = F.softmax(out_shared_prefix_last_tokens) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4) def test_static_cache_matches_dynamic(self): """ Tests that generating with static cache give almost same results as with dynamic cache. This test does not compile the model and check only logits similarity for numerical precision errors. """ if len(self.all_generative_model_classes) == 0: self.skipTest( reason="Model architecture has no generative classes, and thus not necessarily supporting 4D masks" ) for model_class in self.all_generative_model_classes: if not model_class._supports_static_cache: self.skipTest(f"{model_class.__name__} does not support static cache") if not model_class._supports_cache_class: self.skipTest(f"{model_class.__name__} does not support cache class") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() if getattr(config, "sliding_window", 0) > 0: self.skipTest(f"{model_class.__name__} with sliding window attention is not supported by this test") model = model_class(config).to(device=torch_device, dtype=torch.float32) model.eval() dynamic_out = model.generate( **inputs, do_sample=False, max_new_tokens=10, output_logits=True, return_dict_in_generate=True ) static_out = model.generate( **inputs, do_sample=False, max_new_tokens=10, cache_implementation="static", output_logits=True, return_dict_in_generate=True, ) self.assertTrue(torch.allclose(dynamic_out.logits[0], static_out.logits[0], rtol=1e-3, atol=1e-4)) # For now, Let's focus only on GPU for `torch.compile` @slow @require_torch_gpu @require_read_token def test_torch_compile(self): if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") if not hasattr(self, "_torch_compile_test_ckpt"): self.skipTest(f"{self.__class__.__name__} doesn't have the attribute `_torch_compile_test_ckpt`.") ckpt = self._torch_compile_test_ckpt os.environ["TOKENIZERS_PARALLELISM"] = "false" batch_size = 1 n_iter = 3 tokenizer = AutoTokenizer.from_pretrained(ckpt) model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16).to(torch_device) model.generation_config.max_new_tokens = 4 model.generation_config.cache_implementation = "static" model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) input_text = "Why dogs are cute?" input_ids = tokenizer([input_text] * batch_size, return_tensors="pt").to(torch_device) for i in range(n_iter): _ = model.generate(**input_ids, do_sample=False) @slow @require_torch_gpu # Testing cuda graphs. @require_read_token def test_compile_cuda_graph_time(self): if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") # TODO felix: All models supporting `StaticCache` or `torch.compile` should be tested. # At the moment, only llama, gemma and gemma2 are tested here! if not hasattr(self, "_torch_compile_test_ckpt"): self.skipTest(f"{self.__class__.__name__} doesn't have the attribute `_torch_compile_test_ckpt`.") ckpt = self._torch_compile_test_ckpt os.environ["TOKENIZERS_PARALLELISM"] = "false" tokenizer = AutoTokenizer.from_pretrained(ckpt) model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16).to(torch_device) cache_implementation = "static" if model.config.model_type == "gemma2": cache_implementation = "hybrid" new_tokens = 50 gen_config = GenerationConfig( max_new_tokens=new_tokens, min_new_tokens=new_tokens, use_cache=True, pad_token_id=tokenizer.pad_token_id, num_beams=1, do_sample=False, eos_token_id=None, # This is required for min_new_tokens to actually have an effect. ) model.generation_config.eos_token_id = None # greedy_search falls back on this eos_token_id that we need to set to None as well for min_new_tokens to have an effect. model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) inp = tokenizer("Why cats are cute?", return_tensors="pt").to(torch_device) # First run: the first run warms up each graph, which does things like CuBlas or Triton benchmarking start = time.perf_counter() _ = model.generate(**inp, generation_config=gen_config, cache_implementation=cache_implementation) end = time.perf_counter() graph_warmup_time = end - start # Second run: CUDA Graph recording, and replays it start = time.perf_counter() _ = model.generate(**inp, generation_config=gen_config, cache_implementation=cache_implementation) end = time.perf_counter() record_time = end - start # Finally: we hit the optimized, CUDA Graph replay path start = time.perf_counter() _ = model.generate(**inp, generation_config=gen_config, cache_implementation=cache_implementation) end = time.perf_counter() opt_time = end - start # For the recording step, we expect only two cuda graphs and this step should be much faster than the first. self.assertTrue(record_time < 0.15 * graph_warmup_time) self.assertTrue(opt_time < record_time) def test_forward_with_num_logits_to_keep(self): for model_class in self.all_generative_model_classes: if "num_logits_to_keep" not in set(inspect.signature(model_class.forward).parameters.keys()): self.skipTest(reason="This model does not support `num_logits_to_keep` argument.") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() batch_size, sequence_length = inputs["input_ids"].shape vocab_size = config.vocab_size model = model_class(config).to(device=torch_device).eval() # num_logits_to_keep=0 is a special case meaning "keep all logits" all_logits = model(**inputs, num_logits_to_keep=0).logits last_token_logits = model(**inputs, num_logits_to_keep=1).logits # Assert all shapes are correct self.assertEqual(tuple(all_logits.shape), (batch_size, sequence_length, vocab_size)) self.assertEqual(tuple(last_token_logits.shape), (batch_size, 1, vocab_size)) # Assert the last tokens are actually the same (except for the natural fluctuation due to order of FP ops) self.assertTrue(torch.allclose(all_logits[:, -1:, :], last_token_logits, atol=1e-5)) global_rng = random.Random() def ids_tensor(shape, vocab_size, rng=None, name=None): # Creates a random int32 tensor of the shape within the vocab size if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() def random_attention_mask(shape, rng=None, name=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) # make sure that at least one token is attended to for each batch # we choose the 1st token so this property of `at least one being non-zero` still holds after applying causal mask attn_mask[:, 0] = 1 return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
transformers/tests/test_modeling_common.py/0
{ "file_path": "transformers/tests/test_modeling_common.py", "repo_id": "transformers", "token_count": 118068 }
479
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This test is meant to be run in on an instance with TPUs like this: # # python examples/pytorch/xla_spawn.py --num_cores=8 tests/test_trainer_tpu.py # # Replace 8 with the number of TPU cores you have. # import sys from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.utils import logging logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class DummyDataset(Dataset): def __init__(self, length: int = 101): self.length = length def __len__(self): return self.length def __getitem__(self, i) -> int: return i class DummyDataCollator: def __call__(self, features): return {"input_ids": torch.tensor(features), "labels": torch.tensor(features)} class DummyModel(nn.Module): def __init__(self): super().__init__() # Add some (unused) params otherwise DDP will complain. self.fc = nn.Linear(120, 80) def forward(self, input_ids, labels=None): if labels is not None: return torch.tensor(0.0, device=input_ids.device), input_ids else: return input_ids def main(): parser = HfArgumentParser((TrainingArguments,)) sys.argv += ["--output_dir", "./examples"] training_args = parser.parse_args_into_dataclasses()[0] logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, " f"tpu_num_cores: {training_args.tpu_num_cores}", ) # Essentially, what we want to verify in the distributed case is # that we get all samples back, in the right order. # (this is crucial for prediction for instance) for dataset_length in [1001, 256, 15]: dataset = DummyDataset(dataset_length) def compute_metrics(p: EvalPrediction) -> Dict: sequential = list(range(len(dataset))) success = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential return {"success": success} trainer = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = 2 metrics = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) p = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) trainer.args.eval_accumulation_steps = None logger.info("🔥 All distributed tests successful") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/tests/trainer/test_trainer_tpu.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_tpu.py", "repo_id": "transformers", "token_count": 1651 }
480
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, Wav2Vec2FeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR = get_tests_dir("fixtures") class FeatureExtractorUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # This check we did call the fake head request mock_head.assert_called() @is_staging_test class FeatureExtractorPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @staticmethod def _try_delete_repo(repo_id, token): try: # Reset repo delete_repo(repo_id=repo_id, token=token) except: # noqa E722 pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: try: tmp_repo = f"{USER}/test-feature-extractor-{Path(tmp_dir).name}" feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub(tmp_repo, token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo) for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) finally: # Always (try to) delete the repo. self._try_delete_repo(repo_id=tmp_repo, token=self._token) def test_push_to_hub_via_save_pretrained(self): with tempfile.TemporaryDirectory() as tmp_dir: try: tmp_repo = f"{USER}/test-feature-extractor-{Path(tmp_dir).name}" feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) # Push to hub via save_pretrained feature_extractor.save_pretrained(tmp_dir, repo_id=tmp_repo, push_to_hub=True, token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo) for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) finally: # Always (try to) delete the repo. self._try_delete_repo(repo_id=tmp_repo, token=self._token) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: try: tmp_repo = f"valid_org/test-feature-extractor-{Path(tmp_dir).name}" feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub(tmp_repo, token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo) for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) finally: # Always (try to) delete the repo. self._try_delete_repo(repo_id=tmp_repo, token=self._token) def test_push_to_hub_in_organization_via_save_pretrained(self): with tempfile.TemporaryDirectory() as tmp_dir: try: tmp_repo = f"valid_org/test-feature-extractor-{Path(tmp_dir).name}" feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) # Push to hub via save_pretrained feature_extractor.save_pretrained(tmp_dir, repo_id=tmp_repo, push_to_hub=True, token=self._token) new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo) for k, v in feature_extractor.__dict__.items(): self.assertEqual(v, getattr(new_feature_extractor, k)) finally: # Always (try to) delete the repo. self._try_delete_repo(repo_id=tmp_repo, token=self._token) def test_push_to_hub_dynamic_feature_extractor(self): with tempfile.TemporaryDirectory() as tmp_dir: try: tmp_repo = f"{USER}/test-dynamic-feature-extractor-{Path(tmp_dir).name}" CustomFeatureExtractor.register_for_auto_class() feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR) feature_extractor.push_to_hub(tmp_repo, token=self._token) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map, {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"}, ) new_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_repo, trust_remote_code=True) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor") finally: # Always (try to) delete the repo. self._try_delete_repo(repo_id=tmp_repo, token=self._token)
transformers/tests/utils/test_feature_extraction_utils.py/0
{ "file_path": "transformers/tests/utils/test_feature_extraction_utils.py", "repo_id": "transformers", "token_count": 3082 }
481
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # # this test validates that we can stack skip decorators in groups and whether # they work correctly with other decorators # # since the decorators have already built their decision params (like checking # env[], we can't mock the env and test each of the combinations), so ideally # the following 4 should be run. But since we have different CI jobs running # different configs, all combinations should get covered # # RUN_SLOW=1 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=1 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 pytest -rA tests/test_skip_decorators.py # RUN_SLOW=0 CUDA_VISIBLE_DEVICES="" pytest -rA tests/test_skip_decorators.py import os import unittest import pytest from parameterized import parameterized from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device # skipping in unittest tests params = [(1,)] # test that we can stack our skip decorators with 3rd party decorators def check_slow(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow: assert True else: assert False, "should have been skipped" # test that we can stack our skip decorators def check_slow_torch_cuda(): run_slow = bool(os.getenv("RUN_SLOW", 0)) if run_slow and torch_device == "cuda": assert True else: assert False, "should have been skipped" @require_torch class SkipTester(unittest.TestCase): @slow @require_torch_gpu def test_2_skips_slow_first(self): check_slow_torch_cuda() @require_torch_gpu @slow def test_2_skips_slow_last(self): check_slow_torch_cuda() # The combination of any skip decorator, followed by parameterized fails to skip the tests # 1. @slow manages to correctly skip `test_param_slow_first` # 2. but then `parameterized` creates new tests, with a unique name for each parameter groups. # It has no idea that they are to be skipped and so they all run, ignoring @slow # Therefore skip decorators must come after `parameterized` # # @slow # @parameterized.expand(params) # def test_param_slow_first(self, param=None): # check_slow() # This works as expected: # 1. `parameterized` creates new tests with unique names # 2. each of them gets an opportunity to be skipped @parameterized.expand(params) @slow def test_param_slow_last(self, param=None): check_slow() # skipping in non-unittest tests # no problem at all here @slow @require_torch_gpu def test_pytest_2_skips_slow_first(): check_slow_torch_cuda() @require_torch_gpu @slow def test_pytest_2_skips_slow_last(): check_slow_torch_cuda() @slow @pytest.mark.parametrize("param", [1]) def test_pytest_param_slow_first(param): check_slow() @pytest.mark.parametrize("param", [1]) @slow def test_pytest_param_slow_last(param): check_slow()
transformers/tests/utils/test_skip_decorators.py/0
{ "file_path": "transformers/tests/utils/test_skip_decorators.py", "repo_id": "transformers", "token_count": 1214 }
482
import argparse import json import subprocess def get_runner_status(target_runners, token): offline_runners = [] cmd = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) o = output.stdout.decode("utf-8") status = json.loads(o) runners = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(runner) # save the result so we can report them on Slack with open("offline_runners.txt", "w") as fp: fp.write(json.dumps(offline_runners)) if len(offline_runners) > 0: failed = "\n".join([x["name"] for x in offline_runners]) raise ValueError(f"The following runners are offline:\n{failed}") if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--target_runners", default=None, type=list_str, required=True, help="Comma-separated list of runners to check status.", ) parser.add_argument( "--token", default=None, type=str, required=True, help="A token that has actions:read permission." ) args = parser.parse_args() get_runner_status(args.target_runners, args.token)
transformers/utils/check_self_hosted_runner.py/0
{ "file_path": "transformers/utils/check_self_hosted_runner.py", "repo_id": "transformers", "token_count": 611 }
483
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to find a candidate list of models to deprecate based on the number of downloads and the date of the last commit. """ import argparse import glob import json import os from collections import defaultdict from datetime import datetime, timezone from pathlib import Path from git import Repo from huggingface_hub import HfApi api = HfApi() PATH_TO_REPO = Path(__file__).parent.parent.resolve() repo = Repo(PATH_TO_REPO) class HubModelLister: """ Utility for getting models from the hub based on tags. Handles errors without crashing the script. """ def __init__(self, tags): self.tags = tags self.model_list = api.list_models(tags=tags) def __iter__(self): try: yield from self.model_list except Exception as e: print(f"Error: {e}") return def _extract_commit_hash(commits): for commit in commits: if commit.startswith("commit "): return commit.split(" ")[1] return "" def get_list_of_repo_model_paths(models_dir): # Get list of all models in the library models = glob.glob(os.path.join(models_dir, "*/modeling_*.py")) # Remove flax and tf models models = [model for model in models if "_flax_" not in model] models = [model for model in models if "_tf_" not in model] # Get list of all deprecated models in the library deprecated_models = glob.glob(os.path.join(models_dir, "deprecated", "*")) # For each deprecated model, remove the deprecated models from the list of all models as well as the symlink path for deprecated_model in deprecated_models: deprecated_model_name = "/" + deprecated_model.split("/")[-1] + "/" models = [model for model in models if deprecated_model_name not in model] # Remove deprecated models models = [model for model in models if "/deprecated" not in model] # Remove auto models = [model for model in models if "/auto/" not in model] return models def get_list_of_models_to_deprecate( thresh_num_downloads=5_000, thresh_date=None, use_cache=False, save_model_info=False, max_num_models=-1, ): if thresh_date is None: thresh_date = datetime.now(timezone.utc).replace(year=datetime.now(timezone.utc).year - 1) else: thresh_date = datetime.strptime(thresh_date, "%Y-%m-%d").replace(tzinfo=timezone.utc) models_dir = PATH_TO_REPO / "src/transformers/models" model_paths = get_list_of_repo_model_paths(models_dir=models_dir) if use_cache and os.path.exists("models_info.json"): with open("models_info.json", "r") as f: models_info = json.load(f) # Convert datetimes back to datetime objects for model, info in models_info.items(): info["first_commit_datetime"] = datetime.fromisoformat(info["first_commit_datetime"]) else: # Build a dictionary of model info: first commit datetime, commit hash, model path models_info = defaultdict(dict) for model_path in model_paths: model = model_path.split("/")[-2] if model in models_info: continue commits = repo.git.log("--diff-filter=A", "--", model_path).split("\n") commit_hash = _extract_commit_hash(commits) commit_obj = repo.commit(commit_hash) committed_datetime = commit_obj.committed_datetime models_info[model]["commit_hash"] = commit_hash models_info[model]["first_commit_datetime"] = committed_datetime models_info[model]["model_path"] = model_path models_info[model]["downloads"] = 0 # Some tags on the hub are formatted differently than in the library tags = [model] if "_" in model: tags.append(model.replace("_", "-")) models_info[model]["tags"] = tags # Filter out models which were added less than a year ago models_info = { model: info for model, info in models_info.items() if info["first_commit_datetime"] < thresh_date } # We make successive calls to the hub, filtering based on the model tags n_seen = 0 for model, model_info in models_info.items(): for model_tag in model_info["tags"]: model_list = HubModelLister(tags=model_tag) for i, hub_model in enumerate(model_list): n_seen += 1 if i % 100 == 0: print(f"Processing model {i} for tag {model_tag}") if max_num_models != -1 and i > n_seen: break if hub_model.private: continue model_info["downloads"] += hub_model.downloads if save_model_info and not (use_cache and os.path.exists("models_info.json")): # Make datetimes serializable for model, info in models_info.items(): info["first_commit_datetime"] = info["first_commit_datetime"].isoformat() with open("models_info.json", "w") as f: json.dump(models_info, f, indent=4) print("\nFinding models to deprecate:") n_models_to_deprecate = 0 models_to_deprecate = {} for model, info in models_info.items(): n_downloads = info["downloads"] if n_downloads < thresh_num_downloads: n_models_to_deprecate += 1 models_to_deprecate[model] = info print(f"\nModel: {model}") print(f"Downloads: {n_downloads}") print(f"Date: {info['first_commit_datetime']}") print("\nModels to deprecate: ", "\n" + "\n".join(models_to_deprecate.keys())) print(f"\nNumber of models to deprecate: {n_models_to_deprecate}") print("Before deprecating make sure to verify the models, including if they're used as a module in other models.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--save_model_info", action="store_true", help="Save the retrieved model info to a json file.") parser.add_argument( "--use_cache", action="store_true", help="Use the cached model info instead of calling the hub." ) parser.add_argument( "--thresh_num_downloads", type=int, default=5_000, help="Threshold number of downloads below which a model should be deprecated. Default is 5,000.", ) parser.add_argument( "--thresh_date", type=str, default=None, help="Date to consider the first commit from. Format: YYYY-MM-DD. If unset, defaults to one year ago from today.", ) parser.add_argument( "--max_num_models", type=int, default=-1, help="Maximum number of models to consider from the hub. -1 means all models. Useful for testing.", ) args = parser.parse_args() models_to_deprecate = get_list_of_models_to_deprecate( thresh_num_downloads=args.thresh_num_downloads, thresh_date=args.thresh_date, use_cache=args.use_cache, save_model_info=args.save_model_info, max_num_models=args.max_num_models, )
transformers/utils/models_to_deprecate.py/0
{ "file_path": "transformers/utils/models_to_deprecate.py", "repo_id": "transformers", "token_count": 3165 }
484
from transformers import PretrainedConfig class CustomConfig(PretrainedConfig): model_type = "custom" def __init__(self, attribute=1, **kwargs): self.attribute = attribute super().__init__(**kwargs) class NoSuperInitConfig(PretrainedConfig): model_type = "custom" def __init__(self, attribute=1, **kwargs): self.attribute = attribute
transformers/utils/test_module/custom_configuration.py/0
{ "file_path": "transformers/utils/test_module/custom_configuration.py", "repo_id": "transformers", "token_count": 136 }
485
# pip install openrlbenchmark==0.2.1a5 # see https://github.com/openrlbenchmark/openrlbenchmark#get-started for documentation echo "we deal with $TAGS_STRING" python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.reward_model&cen=trl_ppo_trainer_config.value.exp_name&metrics=env/reward_mean&metrics=objective/kl' \ "ppo$TAGS_STRING" \ --env-ids sentiment-analysis:lvwerra/distilbert-imdb \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/ppo \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=output_dir&cen=_name_or_path&metrics=train/rewards/accuracies&metrics=train/loss' \ "gpt2$TAGS_STRING" \ --env-ids dpo_anthropic_hh \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/dpo \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=output_dir&cen=_name_or_path&metrics=train/loss&metrics=eval/accuracy&metrics=eval/loss' \ "facebook/opt-350m$TAGS_STRING" \ --env-ids reward_modeling_anthropic_hh \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/reward_modeling \ --scan-history python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=huggingface&wpn=trl&xaxis=_step&ceik=output_dir&cen=_name_or_path&metrics=train/loss' \ "facebook/opt-350m$TAGS_STRING" \ --env-ids sft_openassistant-guanaco \ --no-check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename benchmark/trl/$FOLDER_STRING/sft \ --scan-history python benchmark/upload_benchmark.py \ --folder_path="benchmark/trl/$FOLDER_STRING" \ --path_in_repo="images/benchmark/$FOLDER_STRING" \ --repo_id="trl-internal-testing/example-images" \ --repo_type="dataset"
trl/benchmark/benchmark_level1_plot.sh/0
{ "file_path": "trl/benchmark/benchmark_level1_plot.sh", "repo_id": "trl", "token_count": 927 }
486
# BCO Trainer TRL supports the Binary Classifier Optimization (BCO). The [BCO](https://huggingface.co/papers/2404.04656) authors train a binary classifier whose logit serves as a reward so that the classifier maps {prompt, chosen completion} pairs to 1 and {prompt, rejected completion} pairs to 0. For a full example have a look at [`examples/scripts/bco.py`]. ## Expected dataset format The BCO trainer expects a very specific format for the dataset as it does not require pairwise preferences. Since the model will be trained to directly optimize examples that consist of a prompt, model completion, and a label to indicate whether the completion is "good" or "bad", we expect a dataset with the following columns: - `prompt` - `completion` - `label` for example: ``` bco_dataset_dict = { "prompt": [ "Hey, hello", "How are you", "What is your name?", "What is your name?", "Which is the best programming language?", "Which is the best programming language?", "Which is the best programming language?", ], "completion": [ "hi nice to meet you", "leave me alone", "I don't have a name", "My name is Mary", "Python", "C++", "Java", ], "label": [ True, False, False, True, True, False, False, ], } ``` where the `prompt` contains the context inputs, `completion` contains the corresponding responses and `label` contains the corresponding flag that indicates if the generated completion is desired (`True`) or undesired (`False`). A prompt can have multiple responses and this is reflected in the entries being repeated in the dictionary's value arrays. It is required that the dataset contains at least one desirable and one undesirable completion. ## Expected model format The BCO trainer expects a model of `AutoModelForCausalLM`, compared to PPO that expects `AutoModelForCausalLMWithValueHead` for the value function. ## Using the `BCOTrainer` For a detailed example have a look at the `examples/scripts/bco.py` script. At a high level we need to initialize the `BCOTrainer` with a `model` we wish to train and a reference `ref_model` which we will use to calculate the implicit rewards of the preferred and rejected response. The `beta` refers to the hyperparameter of the implicit reward, and the dataset contains the 3 entries listed above. Note that the `model` and `ref_model` need to have the same architecture (ie decoder only or encoder-decoder). ```py training_args = BCOConfig( beta=0.1, ) bco_trainer = BCOTrainer( model, model_ref, args=training_args, train_dataset=train_dataset, tokenizer=tokenizer, ) ``` After this one can then call: ```py bco_trainer.train() ``` ## Underlying Distribution matching (UDM) In practical scenarios, the thumbs-up and thumbs-down datasets are likely to have divergent underlying distributions of prompts. Consider an LLM deployed for user feedback: if the model excels in writing tasks but underperforms in coding, the thumbs-up dataset will be dominated by writing-related prompts, while the thumbs-down dataset will contain mostly coding-related prompts. If the prompts in your desired and undesired datasets differ a lot, it is useful to enable UDM. Choose an embedding model and tokenizer: ```py embedding_model = AutoModel.from_pretrained(your_model_id) embedding_tokenizer = AutoTokenizer.from_pretrained(your_model_id) # customize this function depending on your embedding model def embed_prompt(input_ids, attention_mask, model): outputs = model(input_ids=input_ids, attention_mask=attention_mask) return outputs.last_hidden_state.mean(dim=1) embedding_model = Accelerator().prepare_model(self.embedding_model) embedding_func = partial(embed_prompt, model=embedding_model) ``` Set `prompt_sample_size` to defined how many prompts are selected to train the UDM classifier and start the training with the provided embedding function: ```py training_args = BCOConfig( beta=0.1, prompt_sample_size=512, ) bco_trainer = BCOTrainer( model, model_ref, args=training_args, train_dataset=train_dataset, tokenizer=tokenizer, embedding_func=embedding_func, embedding_tokenizer=self.embedding_tokenizer, ) bco_trainer.train() ``` ### For Mixture of Experts Models: Enabling the auxiliary loss MOEs are the most efficient if the load is about equally distributed between experts. To ensure that we train MOEs similarly during preference-tuning, it is beneficial to add the auxiliary loss from the load balancer to the final loss. This option is enabled by setting `output_router_logits=True` in the model config (e.g. MixtralConfig). To scale how much the auxiliary loss contributes to the total loss, use the hyperparameter `router_aux_loss_coef=...` (default: 0.001). ## BCOTrainer [[autodoc]] BCOTrainer ## BCOConfig [[autodoc]] BCOConfig
trl/docs/source/bco_trainer.mdx/0
{ "file_path": "trl/docs/source/bco_trainer.mdx", "repo_id": "trl", "token_count": 1579 }
487
# Learning Tools (Experimental 🧪) Using Large Language Models (LLMs) with tools has been a popular topic recently with awesome works such as [ToolFormer](https://huggingface.co/papers/2302.04761) and [ToolBench](https://huggingface.co/papers/2305.16504). In TRL, we provide a simple example of how to teach LLM to use tools with reinforcement learning. Here's an overview of the scripts in the [trl repository](https://github.com/lvwerra/trl/tree/main/examples/research_projects/tools): | File | Description | |---|---| | [`calculator.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/calculator.py) | Script to train LLM to use a calculator with reinforcement learning. | | [`triviaqa.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/triviaqa.py) | Script to train LLM to use a wiki tool to answer questions. | | [`python_interpreter.py`](https://github.com/lvwerra/trl/blob/main/examples/research_projects/tools/python_interpreter.py) | Script to train LLM to use python interpreter to solve math puzzles. | <Tip warning={true}> Note that the scripts above rely heavily on the `TextEnvironment` API which is still under active development. The API may change in the future. Please see [`TextEnvironment`](text_environment) for the related docs. </Tip> ## Learning to Use a Calculator The rough idea is as follows: 1. Load a tool such as [ybelkada/simple-calculator](https://huggingface.co/spaces/ybelkada/simple-calculator) that parse a text calculation like `"14 + 34"` and return the calulated number: ```python from transformers import AutoTokenizer, load_tool tool = load_tool("ybelkada/simple-calculator") tool_fn = lambda text: str(round(float(tool(text)), 2)) # rounding to 2 decimal places ``` 1. Define a reward function that returns a positive reward if the tool returns the correct answer. In the script we create a dummy reward function like `reward_fn = lambda x: 1`, but we override the rewards directly later. 1. Create a prompt on how to use the tools ```python # system prompt prompt = """\ What is 13.1-3? <request><SimpleCalculatorTool>13.1-3<call>10.1<response> Result=10.1<submit> What is 4*3? <request><SimpleCalculatorTool>4*3<call>12<response> Result=12<submit> What is 12.1+1? <request><SimpleCalculatorTool>12.1+1<call>13.1<response> Result=13.1<submit> What is 12.1-20? <request><SimpleCalculatorTool>12.1-20<call>-7.9<response> Result=-7.9<submit>""" ``` 3. Create a `trl.TextEnvironment` with the model ```python env = TextEnvironment( model, tokenizer, {"SimpleCalculatorTool": tool_fn}, reward_fn, prompt, generation_kwargs=generation_kwargs, ) ``` 4. Then generate some data such as `tasks = ["\n\nWhat is 13.1-3?", "\n\nWhat is 4*3?"]` and run the environment with `queries, responses, masks, rewards, histories = env.run(tasks)`. The environment will look for the `<call>` token in the prompt and append the tool output to the response; it will also return the mask associated with the response. You can further use the `histories` to visualize the interaction between the model and the tool; `histories[0].show_text()` will show the text with color-coded tool output and `histories[0].show_tokens(tokenizer)` will show visualize the tokens. ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/learning_tools.png) 1. Finally, we can train the model with `train_stats = ppo_trainer.step(queries, responses, rewards, masks)`. The trainer will use the mask to ignore the tool output when computing the loss, make sure to pass that argument to `step`. ## Experiment results We trained a model with the above script for 10 random seeds. You can reproduce the run with the following command. Feel free to remove the `--slurm-*` arguments if you don't have access to a slurm cluster. ``` WANDB_TAGS="calculator_final" python benchmark/benchmark.py \ --command "python examples/research_projects/tools/calculator.py" \ --num-seeds 10 \ --start-seed 1 \ --workers 10 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 8 \ --slurm-template-path benchmark/trl.slurm_template ``` We can then use [`openrlbenchmark`](https://github.com/openrlbenchmark/openrlbenchmark) which generates the following plot. ``` python -m openrlbenchmark.rlops_multi_metrics \ --filters '?we=openrlbenchmark&wpn=trl&xaxis=_step&ceik=trl_ppo_trainer_config.value.tracker_project_name&cen=trl_ppo_trainer_config.value.log_with&metrics=env/reward_mean&metrics=objective/kl' \ 'wandb?tag=calculator_final&cl=calculator_mask' \ --env-ids trl \ --check-empty-runs \ --pc.ncols 2 \ --pc.ncols-legend 1 \ --output-filename static/0compare \ --scan-history ``` ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/learning_tools_chart.png) As we can see, while 1-2 experiments crashed for some reason, most of the runs obtained near perfect proficiency in the calculator task. ## (Early Experiments 🧪): learning to use a wiki tool for question answering In the [ToolFormer](https://huggingface.co/papers/2302.04761) paper, it shows an interesting use case that utilizes a Wikipedia Search tool to help answer questions. In this section, we attempt to perform similar experiments but uses RL instead to teach the model to use a wiki tool on the [TriviaQA](https://nlp.cs.washington.edu/triviaqa/) dataset. <Tip warning={true}> **Note that many settings are different so the results are not directly comparable.** </Tip> ### Building a search index Since [ToolFormer](https://huggingface.co/papers/2302.04761) did not open source, we needed to first replicate the search index. It is mentioned in their paper that the authors built the search index using a BM25 retriever that indexes the Wikipedia dump from [KILT](https://github.com/facebookresearch/KILT) Fortunately, [`pyserini`](https://github.com/castorini/pyserini) already implements the BM25 retriever and provides a prebuilt index for the KILT Wikipedia dump. We can use the following code to search the index. ```python from pyserini.search.lucene import LuceneSearcher import json searcher = LuceneSearcher.from_prebuilt_index('wikipedia-kilt-doc') def search(query): hits = searcher.search(query, k=1) hit = hits[0] contents = json.loads(hit.raw)['contents'] return contents print(search("tennis racket")) ``` ``` Racket (sports equipment) A racket or racquet is a sports implement consisting of a handled frame with an open hoop across which a network of strings or catgut is stretched tightly. It is used for striking a ball or shuttlecock in games such as squash, tennis, racquetball, and badminton. Collectively, these games are known as racket sports. Racket design and manufacturing has changed considerably over the centuries. The frame of rackets for all sports was traditionally made of solid wood (later laminated wood) and the strings of animal intestine known as catgut. The traditional racket size was limited by the strength and weight of the wooden frame which had to be strong enough to hold the strings and stiff enough to hit the ball or shuttle. Manufacturers started adding non-wood laminates to wood rackets to improve stiffness. Non-wood rackets were made first of steel, then of aluminum, and then carbon fiber composites. Wood is still used for real tennis, rackets, and xare. Most rackets are now made of composite materials including carbon fiber or fiberglass, metals such as titanium alloys, or ceramics. ... ``` We then basically deployed this snippet as a Hugging Face space [here](https://huggingface.co/spaces/vwxyzjn/pyserini-wikipedia-kilt-doc), so that we can use the space as a `transformers.Tool` later. ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/pyserini.png) ### Experiment settings We use the following settings: * use the `bigcode/starcoderbase` model as the base model * use the `pyserini-wikipedia-kilt-doc` space as the wiki tool and only uses the first paragrahs of the search result, allowing the `TextEnvironment` to obtain at most `max_tool_reponse=400` response tokens from the tool. * test if the response contain the answer string, if so, give a reward of 1, otherwise, give a reward of 0. * notice this is a simplified evaluation criteria. In [ToolFormer](https://huggingface.co/papers/2302.04761), the authors checks if the first 20 words of the response contain the correct answer. * used the following prompt that demonstrates the usage of the wiki tool. ```python prompt = """\ Answer the following question: Q: In which branch of the arts is Patricia Neary famous? A: Ballets A2: <request><Wiki>Patricia Neary<call>Patricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe.<response> Result=Ballets<submit> Q: Who won Super Bowl XX? A: Chicago Bears A2: <request><Wiki>Super Bowl XX<call>Super Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans.<response> Result=Chicago Bears<submit> Q: """ ``` ### Result and Discussion Our experiments show that the agent can learn to use the wiki tool to answer questions. The learning curves would go up mostly, but one of the experiment did crash. ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/triviaqa_learning_curves.png) Wandb report is [here](https://wandb.ai/costa-huang/cleanRL/reports/TriviaQA-Final-Experiments--Vmlldzo1MjY0ODk5) for further inspection. Note that the correct rate of the trained model is on the low end, which could be due to the following reasons: * **incorrect searches:** When given the question `"What is Bruce Willis' real first name?"` if the model searches for `Bruce Willis`, our wiki tool returns "Patrick Poivey (born 18 February 1948) is a French actor. He is especially known for his voice: he is the French dub voice of Bruce Willis since 1988.` But a correct search should be `Walter Bruce Willis (born March 19, 1955) is an American former actor. He achieved fame with a leading role on the comedy-drama series Moonlighting (1985–1989) and appeared in over a hundred films, gaining recognition as an action hero after his portrayal of John McClane in the Die Hard franchise (1988–2013) and other roles.[1][2]" ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/real_first_name.png) * **unnecessarily long response**: The wiki tool by default sometimes output very long sequences. E.g., when the wiki tool searches for "Brown Act" * Our wiki tool returns "The Ralph M. Brown Act, located at California Government Code 54950 "et seq.", is an act of the California State Legislature, authored by Assemblymember Ralph M. Brown and passed in 1953, that guarantees the public's right to attend and participate in meetings of local legislative bodies." * [ToolFormer](https://huggingface.co/papers/2302.04761)'s wiki tool returns "The Ralph M. Brown Act is an act of the California State Legislature that guarantees the public's right to attend and participate in meetings of local legislative bodies." which is more succinct. ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/brown_act.png) ## (Early Experiments 🧪): solving math puzzles with python interpreter In this section, we attempt to teach the model to use a python interpreter to solve math puzzles. The rough idea is to give the agent a prompt like the following: ```python prompt = """\ Example of using a Python API to solve math questions. Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left? <request><PythonInterpreter> def solution(): money_initial = 23 bagels = 5 bagel_cost = 3 money_spent = bagels * bagel_cost money_left = money_initial - money_spent result = money_left return result print(solution()) <call>72<response> Result = 72 <submit> Q: """ ``` Training experiment can be found at https://wandb.ai/lvwerra/trl-gsm8k/runs/a5odv01y ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/gms8k_learning_curve.png)
trl/docs/source/learning_tools.mdx/0
{ "file_path": "trl/docs/source/learning_tools.mdx", "repo_id": "trl", "token_count": 3879 }
488
# Use model after training Once you have trained a model using either the SFTTrainer, PPOTrainer, or DPOTrainer, you will have a fine-tuned model that can be used for text generation. In this section, we'll walk through the process of loading the fine-tuned model and generating text. If you need to run an inference server with the trained model, you can explore libraries such as [`text-generation-inference`](https://github.com/huggingface/text-generation-inference). ## Load and Generate If you have fine-tuned a model fully, meaning without the use of PEFT you can simply load it like any other language model in transformers. E.g. the value head that was trained during the PPO training is no longer needed and if you load the model with the original transformer class it will be ignored: ```python from transformers import AutoTokenizer, AutoModelForCausalLM model_name_or_path = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub device = "cpu" # or "cuda" if you have a GPU model = AutoModelForCausalLM.from_pretrained(model_name_or_path).to(device) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) inputs = tokenizer.encode("This movie was really", return_tensors="pt").to(device) outputs = model.generate(inputs) print(tokenizer.decode(outputs[0])) ``` Alternatively you can also use the pipeline: ```python from transformers import pipeline model_name_or_path = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub pipe = pipeline("text-generation", model=model_name_or_path) print(pipe("This movie was really")[0]["generated_text"]) ``` ## Use Adapters PEFT ```python from peft import PeftConfig, PeftModel from transformers import AutoModelForCausalLM, AutoTokenizer base_model_name = "kashif/stack-llama-2" #path/to/your/model/or/name/on/hub" adapter_model_name = "path/to/my/adapter" model = AutoModelForCausalLM.from_pretrained(base_model_name) model = PeftModel.from_pretrained(model, adapter_model_name) tokenizer = AutoTokenizer.from_pretrained(base_model_name) ``` You can also merge the adapters into the base model so you can use the model like a normal transformers model, however the checkpoint will be significantly bigger: ```python model = AutoModelForCausalLM.from_pretrained(base_model_name) model = PeftModel.from_pretrained(model, adapter_model_name) model = model.merge_and_unload() model.save_pretrained("merged_adapters") ``` Once you have the model loaded and either merged the adapters or keep them separately on top you can run generation as with a normal model outlined above.
trl/docs/source/use_model.md/0
{ "file_path": "trl/docs/source/use_model.md", "repo_id": "trl", "token_count": 778 }
489
<jupyter_start><jupyter_text>**Best-of-n sampling as an alternative to RLHF**This notebook compares reward-model scores of prompt based responses from 1. a base model (`gpt2-imdb`)2. `RLHF` tuned model based on this base-model 3. the base-model again from which we sample n responses to each prompt, score them and take the best scored one AKA the `best-of-n sampled` model Import dependencies<jupyter_code>%pip install transformers trl import torch import pandas as pd from transformers import pipeline, AutoTokenizer from datasets import load_dataset from trl import AutoModelForCausalLMWithValueHead from trl.core import LengthSampler device = 0 if torch.cuda.is_available() else "cpu"<jupyter_output><empty_output><jupyter_text>Various constants<jupyter_code>ref_model_name = "lvwerra/gpt2-imdb" model_name = "lvwerra/gpt2-imdb-pos-v2" reward_model = "lvwerra/distilbert-imdb" N_BEST_OF = 4<jupyter_output><empty_output><jupyter_text>Models and tokenizers<jupyter_code>model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name) ref_model = AutoModelForCausalLMWithValueHead.from_pretrained(ref_model_name) reward_pipe = pipeline("sentiment-analysis", model=reward_model, device=device) tokenizer = AutoTokenizer.from_pretrained(ref_model_name) tokenizer.pad_token = tokenizer.eos_token # cuda-ize models model.cuda() ref_model.cuda()<jupyter_output><empty_output><jupyter_text>Dataset building<jupyter_code>def build_dataset(tokenizer, dataset_name="imdb", input_min_text_length=2, input_max_text_length=8): # load imdb with datasets ds = load_dataset(dataset_name, split="train") ds = ds.rename_columns({"text": "review"}) ds = ds.filter(lambda x: len(x["review"]) > 200, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): sample["input_ids"] = tokenizer.encode(sample["review"])[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") return ds dataset = build_dataset(tokenizer) gen_kwargs = {"min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id} sent_kwargs = {"top_k": None, "function_to_apply": "none", "batch_size": 16} output_min_length = 4 output_max_length = 16 output_length_sampler = LengthSampler(output_min_length, output_max_length) #### get a batch from the dataset bs = 16 output_data = dict() dataset.set_format("pandas") df_batch = dataset[:].sample(bs) output_data["query"] = df_batch["query"].tolist() query_tensors = df_batch["input_ids"].tolist() # :: [Resp] response_tensors_ref, response_tensors = [], [] # :: [[Resp]] response_tensors_best_of = []<jupyter_output><empty_output><jupyter_text>Generation using various models<jupyter_code>for i in range(bs): gen_len = output_length_sampler() query = torch.tensor(query_tensors[i]) output = ref_model.generate(query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs).squeeze() response_tensors_ref.append(tokenizer.decode(output)) output = model.generate(query.unsqueeze(dim=0).to(device), max_new_tokens=gen_len, **gen_kwargs).squeeze() response_tensors.append(tokenizer.decode(output)) # generating copies of the same query for the Best-of-n sampling queries = query.repeat((N_BEST_OF, 1)) output = ref_model.generate(queries.to(device), max_new_tokens=gen_len, **gen_kwargs).squeeze() response_tensors_best_of.append(tokenizer.batch_decode(output))<jupyter_output><empty_output><jupyter_text>Scoring<jupyter_code>scores_ref = [output[0]["score"] for output in reward_pipe(response_tensors_ref, **sent_kwargs)] scores = [output[0]["score"] for output in reward_pipe(response_tensors, **sent_kwargs)] scores_best_of = [] for i, response in enumerate(response_tensors_best_of): # base_score = scores_ref[i] scores_best_of.append(torch.tensor([output[0]["score"] for output in reward_pipe(response, **sent_kwargs)])) output_data["response (ref)"] = response_tensors_ref output_data["scores (ref)"] = scores_ref output_data["response (RLHF)"] = response_tensors output_data["scores (RLHF)"] = scores output_data["response (best_of)"] = [ response_tensors_best_of[i][a.argmax().item()] for i, a in enumerate(scores_best_of) ] output_data["scores (best_of)"] = [a.max().item() for a in scores_best_of] # store results in a dataframe df_results = pd.DataFrame(output_data) df_results<jupyter_output><empty_output>
trl/examples/notebooks/best_of_n.ipynb/0
{ "file_path": "trl/examples/notebooks/best_of_n.ipynb", "repo_id": "trl", "token_count": 1717 }
490
# De-detoxifying language models To run this code, do the following: ```shell ACCELERATE_LOG_LEVEL=info accelerate launch --config_file {CONFIG} examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py --log_with wandb ```
trl/examples/research_projects/toxicity/README.md/0
{ "file_path": "trl/examples/research_projects/toxicity/README.md", "repo_id": "trl", "token_count": 79 }
491
import shutil from accelerate import PartialState from datasets import load_dataset from transformers import ( AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, ) from trl import ModelConfig from trl.trainer.ppov2_trainer import PPOv2Config, PPOv2Trainer from trl.trainer.utils import SIMPLE_QUERY_CHAT_TEMPLATE """ python -i examples/scripts/ppo/ppo.py \ --learning_rate 3e-6 \ --output_dir models/minimal/ppo \ --per_device_train_batch_size 64 \ --gradient_accumulation_steps 1 \ --total_episodes 10000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --non_eos_penalty \ accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml \ examples/scripts/ppo/ppo.py \ --output_dir models/minimal/ppo \ --num_ppo_epochs 1 \ --num_mini_batches 1 \ --learning_rate 3e-6 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 16 \ --total_episodes 10000 \ --model_name_or_path EleutherAI/pythia-1b-deduped \ --sft_model_path EleutherAI/pythia-1b-deduped \ --reward_model_path EleutherAI/pythia-1b-deduped \ --local_rollout_forward_batch_size 1 \ --deepspeed3 \ --non_eos_penalty \ """ if __name__ == "__main__": parser = HfArgumentParser((PPOv2Config, ModelConfig)) config, model_config = parser.parse_args_into_dataclasses() # remove output_dir if exists shutil.rmtree(config.output_dir, ignore_errors=True) ################ # Model & Tokenizer ################ tokenizer = AutoTokenizer.from_pretrained( model_config.model_name_or_path, padding_side="left", trust_remote_code=model_config.trust_remote_code, ) tokenizer.add_special_tokens({"pad_token": "[PAD]"}) if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_QUERY_CHAT_TEMPLATE value_model = AutoModelForSequenceClassification.from_pretrained( config.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 ) reward_model = AutoModelForSequenceClassification.from_pretrained( config.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 ) ref_policy = AutoModelForCausalLM.from_pretrained( config.sft_model_path, trust_remote_code=model_config.trust_remote_code ) policy = AutoModelForCausalLM.from_pretrained( config.sft_model_path, trust_remote_code=model_config.trust_remote_code ) ################ # Dataset ################ raw_datasets = load_dataset("trl-internal-testing/descriptiveness-sentiment-trl-style", split="descriptiveness") eval_samples = 20 train_dataset = raw_datasets.select(range(len(raw_datasets) - eval_samples)) eval_dataset = raw_datasets.select(range(len(raw_datasets) - eval_samples, len(raw_datasets))) dataset_text_field = "prompt" def prepare_dataset(dataset, tokenizer): """pre-tokenize the dataset before training; only collate during training""" def tokenize(element): outputs = tokenizer( element[dataset_text_field], padding=False, ) return {"input_ids": outputs["input_ids"]} return dataset.map( tokenize, batched=True, remove_columns=dataset.column_names, num_proc=config.dataset_num_proc, ) # Compute that only on the main process for faster data processing. # see: https://github.com/huggingface/trl/pull/1255 with PartialState().local_main_process_first(): train_dataset = prepare_dataset(train_dataset, tokenizer) eval_dataset = prepare_dataset(eval_dataset, tokenizer) ################ # Training ################ trainer = PPOv2Trainer( config=config, tokenizer=tokenizer, policy=policy, ref_policy=ref_policy, reward_model=reward_model, value_model=value_model, train_dataset=train_dataset, eval_dataset=eval_dataset, ) trainer.train() trainer.save_model(config.output_dir) if config.push_to_hub: trainer.push_to_hub() trainer.generate_completions()
trl/examples/scripts/ppo/ppo.py/0
{ "file_path": "trl/examples/scripts/ppo/ppo.py", "repo_id": "trl", "token_count": 1786 }
492
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from functools import partial import torch from datasets import Dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, TrainingArguments from trl import IterativeSFTTrainer class IterativeTrainerTester(unittest.TestCase): def setUp(self): self.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab-calibrated" self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) def _init_tensor_dummy_dataset(self): dummy_dataset_dict = { "input_ids": [ torch.tensor([5303, 3621, 3666, 1438, 318]), torch.tensor([3666, 1438, 318, 3666, 1438, 318]), torch.tensor([5303, 3621, 3666, 1438, 318]), ], "attention_mask": [ torch.tensor([1, 1, 1, 1, 1]), torch.tensor([1, 1, 1, 1, 1, 1]), torch.tensor([1, 1, 1, 1, 1]), ], "labels": [ torch.tensor([5303, 3621, 3666, 1438, 318]), torch.tensor([3666, 1438, 318, 3666, 1438, 318]), torch.tensor([5303, 3621, 3666, 1438, 318]), ], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset def _init_textual_dummy_dataset(self): dummy_dataset_dict = { "texts": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], "texts_labels": ["Testing the IterativeSFTTrainer.", "This is a test of the IterativeSFTTrainer"], } dummy_dataset = Dataset.from_dict(dummy_dataset_dict) dummy_dataset.set_format("torch") return dummy_dataset @parameterized.expand( [ ["gpt2", "tensor"], ["gpt2", "text"], ["t5", "tensor"], ["t5", "text"], ] ) def test_iterative_step_from_tensor(self, model_name, input_name): with tempfile.TemporaryDirectory() as tmp_dir: # initialize dataset if input_name == "tensor": dummy_dataset = self._init_tensor_dummy_dataset() inputs = { "input_ids": dummy_dataset["input_ids"], "attention_mask": dummy_dataset["attention_mask"], "labels": dummy_dataset["labels"], } else: dummy_dataset = self._init_textual_dummy_dataset() inputs = { "texts": dummy_dataset["texts"], "texts_labels": dummy_dataset["texts_labels"], } if model_name == "gpt2": model = self.model tokenizer = self.tokenizer else: model = self.t5_model tokenizer = self.t5_tokenizer args = TrainingArguments( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=2, learning_rate=1e-3, report_to="none", ) iterative_trainer = IterativeSFTTrainer(model=model, args=args, tokenizer=tokenizer) iterative_trainer.optimizer.zero_grad = partial(iterative_trainer.optimizer.zero_grad, set_to_none=False) iterative_trainer.step(**inputs) for param in iterative_trainer.model.parameters(): assert param.grad is not None
trl/tests/test_iterative_sft_trainer.py/0
{ "file_path": "trl/tests/test_iterative_sft_trainer.py", "repo_id": "trl", "token_count": 2180 }
493
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os from copy import deepcopy from typing import Optional import torch import torch.nn as nn from accelerate import PartialState from huggingface_hub import hf_hub_download from huggingface_hub.utils import ( EntryNotFoundError, HFValidationError, LocalEntryNotFoundError, RepositoryNotFoundError, ) from safetensors.torch import load_file as safe_load_file from transformers import PreTrainedModel from ..import_utils import is_npu_available, is_peft_available, is_transformers_greater_than, is_xpu_available if is_peft_available(): from peft import ( PeftConfig, PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PromptLearningConfig, get_peft_model, prepare_model_for_kbit_training, ) if is_transformers_greater_than("4.33.0"): from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled else: from transformers.deepspeed import is_deepspeed_zero3_enabled LAYER_PATTERNS = [ "transformer.h.{layer}", "model.decoder.layers.{layer}", "gpt_neox.layers.{layer}", "model.layers.{layer}", ] class PreTrainedModelWrapper(nn.Module): r""" A wrapper class around a (`transformers.PreTrainedModel`) to be compatible with the (`~transformers.PreTrained`) class in order to keep some attributes and methods of the (`~transformers.PreTrainedModel`) class. Attributes: pretrained_model: (`transformers.PreTrainedModel`) The model to be wrapped. parent_class: (`transformers.PreTrainedModel`) The parent class of the model to be wrapped. supported_args: (`list`) The list of arguments that are supported by the wrapper class. """ transformers_parent_class = None supported_args = None supported_modules = ("v_head",) supported_rm_modules = ("score",) supported_pretrained_model_architectures = ( (PreTrainedModel) if not is_peft_available() else (PreTrainedModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM) ) def __init__( self, pretrained_model=None, score_module=None, supports_rm_adapter=False, rm_adapter_name=None, **kwargs ): super().__init__() self.pretrained_model = pretrained_model self.config = pretrained_model.config self.prepare_inputs_for_generation = pretrained_model.prepare_inputs_for_generation self.is_loaded_in_8bit = getattr(pretrained_model, "is_loaded_in_8bit", False) self.is_loaded_in_4bit = getattr(pretrained_model, "is_loaded_in_4bit", False) self.is_sequential_parallel = False if hasattr(pretrained_model, "gradient_checkpointing_disable"): self.gradient_checkpointing_disable = pretrained_model.gradient_checkpointing_disable if hasattr(pretrained_model, "gradient_checkpointing_enable"): self.gradient_checkpointing_enable = pretrained_model.gradient_checkpointing_enable if hasattr(pretrained_model, "enable_input_require_grads"): self.enable_input_require_grads = pretrained_model.enable_input_require_grads self.supports_rm_adapter = supports_rm_adapter self.rm_adapter_name = rm_adapter_name self.policy_adapter_name = "default" if score_module is not None: self.score = score_module @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiates a new model from a pretrained model from `transformers`. The pretrained model is loaded using the `from_pretrained` method of the `transformers.PreTrainedModel` class. The arguments that are specific to the `transformers.PreTrainedModel` class are passed along this method and filtered out from the `kwargs` argument. Args: pretrained_model_name_or_path (`str` or `transformers.PreTrainedModel`): The path to the pretrained model or its name. *model_args (`list`, *optional*)): Additional positional arguments passed along to the underlying model's `from_pretrained` method. **kwargs (`dict`, *optional*): Additional keyword arguments passed along to the underlying model's `from_pretrained` method. We also pre-process the kwargs to extract the arguments that are specific to the `transformers.PreTrainedModel` class and the arguments that are specific to trl models. The kwargs also support `prepare_model_for_kbit_training` arguments from `peft` library. """ if kwargs is not None: peft_config = kwargs.pop("peft_config", None) reward_adapter = kwargs.pop("reward_adapter", None) reward_adapter_name = kwargs.pop("reward_adapter_name", "reward_adapter") is_trainable = kwargs.pop("is_trainable", False) trl_model_args, pretrained_kwargs, peft_quantization_kwargs = cls._split_kwargs(kwargs) token = pretrained_kwargs.get("token", None) else: peft_config = None is_trainable = False trl_model_args = {} pretrained_kwargs = {} peft_quantization_kwargs = {} token = None if reward_adapter is not None and not isinstance(reward_adapter, str): raise ValueError( "The `reward_adapter` argument should be a string representing the name of local path or the Hub id to the Reward Modeling adapter." ) is_peft_model = False current_device = cls._get_current_device() if isinstance(pretrained_model_name_or_path, str): is_loaded_in_8bit = pretrained_kwargs["load_in_8bit"] if "load_in_8bit" in pretrained_kwargs else False is_loaded_in_4bit = pretrained_kwargs["load_in_4bit"] if "load_in_4bit" in pretrained_kwargs else False else: is_loaded_in_8bit = getattr(pretrained_model_name_or_path, "is_loaded_in_8bit", False) is_loaded_in_4bit = getattr(pretrained_model_name_or_path, "is_loaded_in_4bit", False) if (is_loaded_in_8bit or is_loaded_in_4bit) and "device_map" not in pretrained_kwargs: # warn users logging.warning( "The `device_map` argument is not provided. We will override the device_map argument." " to set the entire" " model on the current device. If you want to set the model on multiple devices, please provide" " a custom `device_map` argument." ) pretrained_kwargs["device_map"] = {"": current_device} if is_peft_available() and peft_config is not None and not isinstance(peft_config, PeftConfig): raise ValueError("The `peft_config` argument should be an instance of `peft.PeftConfig` class.") # First, load the pre-trained model using the parent-class # either `AutoModelForCausalLM` or `AutoModelForSeq2SeqLM` if isinstance(pretrained_model_name_or_path, str): if is_peft_available(): try: # If there is a trained peft adapter in the hub, load its config. remote_adapter_config = hf_hub_download( pretrained_model_name_or_path, "adapter_config.json", token=token, ) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): remote_adapter_config = None else: remote_adapter_config = None local_adapter_present = os.path.exists(os.path.join(pretrained_model_name_or_path, "adapter_config.json")) if (local_adapter_present or remote_adapter_config is not None) and is_peft_available(): if peft_config is not None: logging.warning( "`peft_config` argument ignored since a peft config file was found in " f"{pretrained_model_name_or_path}" ) # Load the trained peft adapter config if local_adapter_present: trained_adapter_config = PeftConfig.from_pretrained(pretrained_model_name_or_path) else: remote_adapter_dir = os.path.dirname(remote_adapter_config) trained_adapter_config = PeftConfig.from_pretrained(remote_adapter_dir) # Load the pretrained base model pretrained_model = cls.transformers_parent_class.from_pretrained( trained_adapter_config.base_model_name_or_path, *model_args, **pretrained_kwargs ) # Wrap the pretrained model with the trained peft adapter pretrained_model = PeftModel.from_pretrained( pretrained_model, pretrained_model_name_or_path, is_trainable=is_trainable, token=token ) logging.info("Trained peft adapter loaded") else: pretrained_model = cls.transformers_parent_class.from_pretrained( pretrained_model_name_or_path, *model_args, **pretrained_kwargs ) if peft_config is not None: # Initialize a new peft adapter with the given config if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training( pretrained_model, **peft_quantization_kwargs, ) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info("peft adapter initialised") elif isinstance(pretrained_model_name_or_path, cls.supported_pretrained_model_architectures): pretrained_model = pretrained_model_name_or_path if peft_config is not None and isinstance(pretrained_model, PreTrainedModel): # Initialize a new peft adapter with the given config if is_loaded_in_8bit or is_loaded_in_4bit: pretrained_model = prepare_model_for_kbit_training( pretrained_model, **peft_quantization_kwargs, ) pretrained_model = get_peft_model(pretrained_model, peft_config) logging.info("peft adapter initialised") else: raise ValueError( "pretrained_model_name_or_path should be a string or a PreTrainedModel, " f"but is {type(pretrained_model_name_or_path)}" ) if is_peft_available(): if isinstance(pretrained_model, PeftModel): is_peft_model = True # for backward compatibility if hasattr(pretrained_model, "active_peft_config") and isinstance( pretrained_model.active_peft_config, PromptLearningConfig ): raise ValueError("PromptLearningConfig is not supported for PPO training.") # Add reward modeling adapter if specified if not is_peft_model and reward_adapter is not None: raise ValueError("reward_adapter can only be used with a PeftModel. ") elif is_peft_model and reward_adapter is not None: score_module = cls.add_and_load_reward_modeling_adapter( pretrained_model, reward_adapter, reward_adapter_name, token=token ) multi_adapter_args = { "score_module": score_module, "supports_rm_adapter": True, "rm_adapter_name": reward_adapter_name, } else: multi_adapter_args = {"supports_rm_adapter": False} # Then, create the full model by instantiating the wrapper class model = cls(pretrained_model, **multi_adapter_args, **trl_model_args) # if resume_training, load the state_dict again - this is ok since the # state_dict is removed from the model after loading it. is_resuming_training = True if isinstance(pretrained_model_name_or_path, str): safe_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors") filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin") sharded_index_filename = os.path.join(pretrained_model_name_or_path, "pytorch_model.bin.index.json") safe_sharded_index_filename = os.path.join(pretrained_model_name_or_path, "model.safetensors.index.json") is_sharded = False use_safe = os.path.exists(safe_filename) if not (os.path.exists(filename) or os.path.exists(safe_filename)): # Try with `pytorch_model.bin` filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub( pretrained_model, pretrained_model_name_or_path, sharded_index_filename, token=token, ) # Try with safetensors if filename is None and files_to_download is None: safe_filename, files_to_download, is_sharded, is_resuming_training = cls._get_checkpoint_from_hub( pretrained_model, pretrained_model_name_or_path, safe_sharded_index_filename, token=token, model_name="model.safetensors", model_index_name="model.safetensors.index.json", ) use_safe = True else: use_safe = False loading_func = safe_load_file if use_safe else torch.load load_kwargs = {} if use_safe else {"map_location": "cpu", "weights_only": True} if is_resuming_training: if is_sharded: # download each file and add it to the state_dict state_dict = {} for shard_file in files_to_download: filename = hf_hub_download( pretrained_model_name_or_path, shard_file, token=token, ) state_dict.update(loading_func(filename, **load_kwargs)) else: state_dict = loading_func(filename if not use_safe else safe_filename, **load_kwargs) else: state_dict = pretrained_model_name_or_path.state_dict() model.is_peft_model = is_peft_model model.current_device = current_device if is_resuming_training: model.post_init(state_dict=state_dict) return model @classmethod def _get_checkpoint_from_hub( cls, pretrained_model, pretrained_model_name_or_path, index_filename, token=None, model_name="pytorch_model.bin", model_index_name="pytorch_model.bin.index.json", ): files_to_download = None filename = None is_resuming_training = True is_sharded = False try: filename = hf_hub_download( pretrained_model_name_or_path, model_name, token=token, ) # sharded except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): if os.path.exists(index_filename): index_file_name = index_filename else: try: index_file_name = hf_hub_download( pretrained_model_name_or_path, model_index_name, token=token, ) except (EntryNotFoundError, LocalEntryNotFoundError, HFValidationError, RepositoryNotFoundError): # not continue training, do not have v_head weight is_resuming_training = False logging.warning( f"A {type(pretrained_model)} model is loaded from '{pretrained_model_name_or_path}', " f"and no v_head weight is found. This IS expected if you are not resuming PPO training." ) # load json if is_resuming_training: with open(index_file_name) as f: index = json.load(f) # check filename with `v_head` or any known extra module: files_to_download = set() for k, v in index["weight_map"].items(): if any(module in k for module in cls.supported_modules): files_to_download.add(v) is_sharded = True return filename, files_to_download, is_sharded, is_resuming_training @classmethod def _get_current_device(cls): r""" Get the current device. For GPU, we return the local process index using the `accelerate.PartialState` object to handle corner cases when running scripts in distributed environments. Returns: current_device (`Union[int, str]`): The current device. """ state = PartialState() if is_xpu_available(): return f"xpu:{state.local_process_index}" elif is_npu_available(): return f"npu:{state.local_process_index}" else: return state.local_process_index if torch.cuda.is_available() else "cpu" @classmethod def _split_kwargs(cls, kwargs): """ Separate the kwargs from the arguments that we support inside `supported_args` and the ones that we don't. """ check_peft_kwargs = False if is_peft_available(): from peft import prepare_model_for_kbit_training check_peft_kwargs = True supported_kwargs = {} unsupported_kwargs = {} peft_kwargs = {} for key, value in kwargs.items(): if key in cls.supported_args: supported_kwargs[key] = value else: unsupported_kwargs[key] = value if check_peft_kwargs: if key in prepare_model_for_kbit_training.__code__.co_varnames: peft_kwargs[key] = value if key in unsupported_kwargs: unsupported_kwargs.pop(key) return supported_kwargs, unsupported_kwargs, peft_kwargs @classmethod def add_and_load_reward_modeling_adapter( cls, pretrained_model, adapter_model_id, adapter_name="reward_model_adapter", token=None ): r""" Add and load a reward modeling adapter. This method can only be used if the model is a `PeftModel` and if you have initialized the model with the `reward_modeling_adapter_id` argument, pointing to the id of the reward modeling adapter. The latest needs also to contain the score head in order to produce the reward. """ pretrained_model.load_adapter(adapter_model_id, adapter_name, is_trainable=False) pretrained_model.train() filename = os.path.join(adapter_model_id, "adapter_model.bin") safe_loading = False if not os.path.exists(filename): try: local_filename = hf_hub_download( adapter_model_id, "adapter_model.bin", token=token, ) except Exception: filename = os.path.join(adapter_model_id, "adapter_model.safetensors") safe_loading = True if not os.path.exists(filename): try: local_filename = hf_hub_download( adapter_model_id, "adapter_model.safetensors", token=token, ) except Exception as exc: raise ValueError( "Could not find adapter model in the Hub, " "make sure you have the correct adapter model id." ) from exc else: local_filename = filename else: local_filename = filename loading_func = safe_load_file if safe_loading else torch.load load_kwargs = {} if safe_loading else {"map_location": "cpu", "weights_only": True} adapter_state_dict = loading_func(local_filename, **load_kwargs) for score_name_candidate in cls.supported_rm_modules: if any(score_name_candidate in name for name in adapter_state_dict.keys()): score_name = score_name_candidate # we have found the correct head name and can break break score_dict = {} for name, param in adapter_state_dict.items(): if score_name in name: key_name = ".".join(name.split(".")[-1:]) score_dict[key_name] = param.to(cls._get_current_device()) num_labels, hidden_dim = score_dict["weight"].shape has_bias = any("bias" in name for name in adapter_state_dict.keys()) score = nn.Linear(hidden_dim, num_labels, bias=has_bias).to( device=cls._get_current_device(), dtype=pretrained_model.dtype, ) score.load_state_dict(score_dict) for param in score.parameters(): param.requires_grad = False return score def push_to_hub(self, *args, **kwargs): r""" Push the pretrained model to the hub. This method is a wrapper around `transformers.PreTrainedModel.push_to_hub`. Please refer to the documentation of `transformers.PreTrainedModel.push_to_hub` for more information. Args: *args (`list`, *optional*): Positional arguments passed along to the underlying model's `push_to_hub` method. **kwargs (`dict`, *optional*): Keyword arguments passed along to the underlying model's `push_to_hub` method. """ raise NotImplementedError def save_pretrained(self, *args, **kwargs): r""" Save the pretrained model to a directory. This method is a wrapper around `transformers.PreTrainedModel.save_pretrained`. Please refer to the documentation of `transformers.PreTrainedModel.save_pretrained` for more information. Args: *args (`list`, *optional*): Positional arguments passed along to the underlying model's `save_pretrained` method. **kwargs (`dict`, *optional*): Keyword arguments passed along to the underlying model's `save_pretrained` method. """ state_dict = kwargs.get("state_dict") if state_dict is None: state_dict = self.state_dict() kwargs["state_dict"] = state_dict # if it is a peft model only save the `v_head` state_dict and # pop the `state_dict` from the kwargs to avoid slient bugs with `peft` if self.is_peft_model: save_path = args[0] save_path = os.path.join(save_path, "pytorch_model.bin") torch.save(state_dict, save_path) _ = kwargs.pop("state_dict", None) return self.pretrained_model.save_pretrained(*args, **kwargs) def state_dict(self, *args, **kwargs): r""" Return the state_dict of the pretrained model. """ raise NotImplementedError def post_init(self, *args, **kwargs): r""" Post initialization method. This method is called after the model is instantiated and loaded from a checkpoint. It can be used to perform additional operations such as loading the state_dict. """ raise NotImplementedError def compute_reward_score(self, input_ids, attention_mask=None, **kwargs): r""" Computes the reward score for a given input. The method has first to enable the adapter and then compute the reward score. After that the model disables the reward modeling adapter and enables the default ppo adapter again. """ if not self.supports_rm_adapter: raise ValueError("This model does not support reward modeling adapter.") # enable rm adapter self.pretrained_model.set_adapter(self.rm_adapter_name) self.pretrained_model.eval() with torch.no_grad(): base_model_output = self.pretrained_model( input_ids=input_ids, attention_mask=attention_mask, output_hidden_states=True, return_dict=True, **kwargs, ) last_hidden_states = base_model_output.hidden_states[-1] scores = self.score(last_hidden_states) self.pretrained_model.set_adapter(self.policy_adapter_name) self.pretrained_model.eval() return scores def create_reference_model( model: PreTrainedModelWrapper, num_shared_layers: Optional[int] = None, pattern: Optional[str] = None ) -> PreTrainedModelWrapper: """ Creates a static reference copy of a model. Note that model will be in `.eval()` mode. Args: model (`PreTrainedModelWrapper`): The model to be copied. num_shared_layers (`int`, *optional*): The number of initial layers that are shared between both models and kept frozen. pattern (`str`, *optional*): The shared layers are selected with a string pattern (e.g. "transformer.h.{layer}" for GPT2) and if a custom pattern is necessary it can be passed here. Returns `PreTrainedModelWrapper` """ if is_deepspeed_zero3_enabled(): raise ValueError( "DeepSpeed ZeRO-3 is enabled and is not compatible with `create_reference_model()`. Please instantiate your reference model directly with `AutoCausalLM.from_pretrained()`." ) parameter_names = [n for n, _ in model.named_parameters()] ref_model = deepcopy(model) # if no layers are shared, return copy of model if num_shared_layers is None: for param_name in parameter_names: param = ref_model.get_parameter(param_name) param.requires_grad = False return ref_model.eval() # identify layer name pattern if pattern is not None: pattern = pattern.format(layer=num_shared_layers) else: for pattern_candidate in LAYER_PATTERNS: pattern_candidate = pattern_candidate.format(layer=num_shared_layers) if any(pattern_candidate in name for name in parameter_names): pattern = pattern_candidate break if pattern is None: raise ValueError("Layer pattern could not be matched.") # divide parameters in shared and unshared parameter lists shared_param_list = [] unshared_param_list = [] shared_parameter = True for name, _param in model.named_parameters(): if pattern in name: shared_parameter = False if shared_parameter: shared_param_list.append(name) else: unshared_param_list.append(name) # create reference of the original parameter if they are shared for param_name in shared_param_list: param = model.get_parameter(param_name) param.requires_grad = False _ref_param = ref_model.get_parameter(param_name) # for all other parameters just make sure they don't use gradients for param_name in unshared_param_list: param = ref_model.get_parameter(param_name) param.requires_grad = False if pattern is not None and len(unshared_param_list) == 0: logging.warning("Pattern passed or found, but no layers matched in the model. Check for a typo.") return ref_model.eval()
trl/trl/models/modeling_base.py/0
{ "file_path": "trl/trl/models/modeling_base.py", "repo_id": "trl", "token_count": 13203 }
494
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from enum import Enum from typing import Dict, Literal, Optional from transformers import TrainingArguments class FDivergenceType(Enum): REVERSE_KL = "reverse_kl" JS_DIVERGENCE = "js_divergence" ALPHA_DIVERGENCE = "alpha_divergence" class FDivergenceConstants: ALPHA_DIVERGENCE_COEF_KEY = "alpha_divergence_coef" ALPHA_DIVERGENCE_COEF_DEFAULT = 1.0 @dataclass class DPOConfig(TrainingArguments): r""" Initialize DPOConfig. Args: beta (`float`, *optional*, defaults to `0.1`): The beta factor in DPO loss. Higher beta means less divergence from the initial policy. For the IPO loss, beta is the regularization parameter denoted by tau in the paper. label_smoothing (`float`, *optional*, defaults to `0.0`): The robust DPO label smoothing parameter from the [cDPO](https://ericmitchell.ai/cdpo.pdf) report and [Robust DPO](https://huggingface.co/papers/2403.00409) paper that should be between 0 and 0.5. loss_type (`str`, *optional*, defaults to `"sigmoid"`): The type of DPO loss to use. Possible values are: - `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper. - `"hinge"`: hinge loss on the normalized likelihood from the [SLiC](https://huggingface.co/papers/2305.10425) paper. - `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper. - `"exo_pair"`: pairwise EXO loss from the [EXO](https://huggingface.co/papers/2402.00856) paper. - `"nca_pair"`: pairwise NCA loss from the [NCA](https://huggingface.co/papers/2402.05369) paper. - `"robust"`: unbiased estimate of the DPO loss that is robust to preference noise from the [Robust DPO](https://huggingface.co/papers/2403.00409) paper. - `"bco_pair"`: pairwise BCO loss from the [BCO](https://huggingface.co/papers/2404.04656) paper. - `"sppo_hard"`: SPPO loss with hard label from the [SPPO](https://huggingface.co/papers/2405.00675) paper. - `"aot"`: AOT loss for paired datasets from the [AOT](https://huggingface.co/papers/2406.05882) paper. - `"aot_pair"`: AOT loss for unpaired datasets from the [AOT](https://huggingface.co/papers/2406.05882) paper. - `"apo_zero"`: APO-zero loss from the [APO](https://huggingface.co/papers/2408.06266) paper. - `"apo_down"`: APO-down loss from the [APO](https://huggingface.co/papers/2408.06266) paper. label_pad_token_id (`int`, *optional*, defaults to `-100`): The label pad token id. This argument is required if you want to use the default data collator. padding_value (`Optional[int]`, *optional*, defaults to `None`): The padding value if it is different to the tokenizer's pad_token_id. truncation_mode (`str`, *optional*, defaults to `"keep_end"`): The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator. max_length (`Optional[int]`, *optional*, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`Optional[int]`, *optional*, defaults to `None`): The maximum length of the prompt. This argument is required if you want to use the default data collator. max_target_length (`Optional[int]`, *optional*, defaults to `None`): The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder. is_encoder_decoder(`Optional[int]`, *optional*, defaults to `None`): If no model is provided, we need to know if the model_init returns an encoder-decoder. disable_dropout (`bool`, *optional*, defaults to `True`): Whether or not to disable dropouts in `model` and `ref_model`. generate_during_eval (`bool`, *optional*, defaults to `False`): Whether to sample and log generations during evaluation step. precompute_ref_log_probs (`bool`, *optional*, defaults to `False`): Flag to precompute reference model log probabilities for training and evaluation datasets. This is useful if you want to train without the reference model and reduce the total GPU memory needed. dataset_num_proc (`Optional[int]`, *optional*, defaults to `None`): The number of workers to use to tokenize the data. Defaults to None. model_init_kwargs (`Optional[Dict]`, *optional*, defaults to `None`): Dict of Optional kwargs to pass when instantiating the model from a string ref_model_init_kwargs (`Optional[Dict]`, *optional*, defaults to `None`): Dict of Optional kwargs to pass when instantiating the ref model from a string model_adapter_name (`Optional[str]`, *optional*, defaults to `None`): Name of the train target PEFT adapter, when using LoRA with multiple adapters. ref_adapter_name (`Optional[str]`, *optional*, defaults to `None`): Name of the reference PEFT adapter, when using LoRA with multiple adapters. reference_free (`bool`, *optional*, defaults to `False`): If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses. force_use_ref_model (`bool`, *optional*, defaults to `False`): In case one passes a PEFT model for the active model and you want to use a different model for the ref_model, set this flag to `True`. f_divergence_type (`FDivergenceType`, *optional*, defaults to `FDivergenceType.REVERSE_KL`): The type of f-divergence regularization function to compute divergence between policy and reference model. This argument is optional, defaults to `FDivergenceType.REVERSE_KL`. f_alpha_divergence_coef (`float`, *optional*, defaults to `1.0`): The alpha coef in alpha-divergence(u^-alpha) regularization function for DPO loss. sync_ref_model ('bool', *optional*, defaults to `False`): The flag for syncing reference model during training from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper. ref_model_mixup_alpha ('float', *optional*, defaults to `1.0`): The alpha parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper. ref_model_sync_steps ('int', *optional*, defaults to `2`): The tau parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper. rpo_alpha ('float', *optional*, defaults to `None`): The alpha parameter from the [RPO](https://huggingface.co/papers/2404.19733) paper V3. If None, no weighting is applied and the loss is the same as the DPO loss. The paper recommends `rpo_alpha=1.0`. """ beta: float = 0.1 label_smoothing: float = 0 loss_type: Literal[ "sigmoid", "hinge", "ipo", "exo_pair", "nca_pair", "robust", "bco_pair", "sppo_hard", "aot", "aot_pair", "apo_zero", "apo_down", ] = "sigmoid" label_pad_token_id: int = -100 padding_value: Optional[int] = None truncation_mode: str = "keep_end" max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_target_length: Optional[int] = None is_encoder_decoder: Optional[bool] = None disable_dropout: bool = True generate_during_eval: bool = False precompute_ref_log_probs: bool = False dataset_num_proc: Optional[int] = None model_init_kwargs: Optional[Dict] = None ref_model_init_kwargs: Optional[Dict] = None model_adapter_name: Optional[str] = None ref_adapter_name: Optional[str] = None reference_free: bool = False force_use_ref_model: bool = False f_divergence_type: FDivergenceType = FDivergenceType.REVERSE_KL f_alpha_divergence_coef: float = 1.0 sync_ref_model: bool = False ref_model_mixup_alpha: float = 0.9 ref_model_sync_steps: int = 64 rpo_alpha: Optional[float] = None def __post_init__(self): if self.loss_type == "kto_pair": raise ValueError("Support for kto_pair has been removed in DPOTrainer. Please use KTOTrainer.") return super().__post_init__()
trl/trl/trainer/dpo_config.py/0
{ "file_path": "trl/trl/trainer/dpo_config.py", "repo_id": "trl", "token_count": 3487 }
495
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings from collections import defaultdict from dataclasses import FrozenInstanceError, replace from functools import wraps from typing import Any, Callable, Dict, List, Optional, Tuple, Union import pandas as pd import torch import torch.nn as nn from accelerate.utils import gather_object from datasets import Dataset from transformers import DataCollator, PreTrainedModel, PreTrainedTokenizerBase, Trainer, TrainingArguments from transformers.trainer_callback import TrainerCallback from transformers.trainer_pt_utils import nested_detach from transformers.trainer_utils import EvalPrediction from ..import_utils import is_peft_available from .reward_config import RewardConfig from .utils import RewardDataCollatorWithPadding, compute_accuracy, print_rich_table, trl_sanitze_kwargs_for_tagging if is_peft_available(): from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training class RewardTrainer(Trainer): r""" The RewardTrainer can be used to train your custom Reward Model. It is a subclass of the `transformers.Trainer` class and inherits all of its attributes and methods. It is recommended to use an `AutoModelForSequenceClassification` as the reward model. The reward model should be trained on a dataset of paired examples, where each example is a tuple of two sequences. The reward model should be trained to predict which example in the pair is more relevant to the task at hand. The reward trainer expects a very specific format for the dataset. The dataset should contain two 4 entries at least if you don't use the default `RewardDataCollatorWithPadding` data collator. The entries should be named - `input_ids_chosen` - `attention_mask_chosen` - `input_ids_rejected` - `attention_mask_rejected` Optionally, you can also pass a `margin` entry to the dataset. This entry should contain the margin used to modulate the loss of the reward model as outlined in https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/. If you don't pass a margin, no margin will be used. """ _tag_names = ["trl", "reward-trainer"] def __init__( self, model: Optional[Union[PreTrainedModel, nn.Module]] = None, args: Optional[RewardConfig] = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, callbacks: Optional[List[TrainerCallback]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( None, None, ), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, max_length: Optional[int] = None, peft_config: Optional[Dict] = None, ): """ Initialize RewardTrainer. Args: model (`transformers.PreTrainedModel`): The model to train, preferably an `AutoModelForSequenceClassification`. args (`RewardConfig`): The arguments to use for training. data_collator (`transformers.DataCollator`): The data collator to use for training. If None is specified, the default data collator (`RewardDataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. train_dataset (`datasets.Dataset`): The dataset to use for training. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. tokenizer (`transformers.PreTrainedTokenizerBase`): The tokenizer to use for training. This argument is required if you want to use the default data collator. model_init (`Callable[[], transformers.PreTrainedModel]`): The model initializer to use for training. If None is specified, the default model initializer will be used. compute_metrics (`Callable[[transformers.EvalPrediction], Dict]`, *optional* defaults to `compute_accuracy`): The metrics to use for evaluation. If no metrics are specified, the default metric (`compute_accuracy`) will be used. callbacks (`List[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. max_length (`int`, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. peft_config (`Dict`, defaults to `None`): The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model. """ if type(args) == TrainingArguments: warnings.warn( "Using `transformers.TrainingArguments` for `args` is deprecated and will be removed in a future version. Please use `RewardConfig` instead.", FutureWarning, ) if max_length is not None: warnings.warn( "The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.", FutureWarning, ) else: if max_length is not None and args.max_length is not None: raise ValueError( "You cannot specify both `max_length` and `args.max_length`. Please use the `RewardConfig` to set `max_length` once." ) if max_length is not None and args.max_length is None: warnings.warn( "The `max_length` argument is deprecated and will be removed in a future version. Please use the `RewardConfig` to set `max_length` instead.", FutureWarning, ) if not is_peft_available() and peft_config is not None: raise ValueError( "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" ) elif is_peft_available() and peft_config is not None: if not isinstance(model, PeftModel): if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_quantized", False): _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( inspect.signature(prepare_model_for_kbit_training).parameters ) prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} if not _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: warnings.warn( "You passed `gradient_checkpointing_kwargs` in the trainer's kwargs, but your peft version does not support it. " "please update to the latest version of peft to use `gradient_checkpointing_kwargs`." ) elif _supports_gc_kwargs and args.gradient_checkpointing_kwargs is not None: prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) model = get_peft_model(model, peft_config) if compute_metrics is None: compute_metrics = compute_accuracy if data_collator is None: if tokenizer is None: raise ValueError( "max_length or a tokenizer must be specified when using the default RewardDataCollatorWithPadding" ) if type(args) == TrainingArguments: if max_length is None: warnings.warn( "When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig." " It will be set to `512` by default, but you should do it yourself in the future.", UserWarning, ) max_length = 512 else: if max_length is None and args.max_length is None: warnings.warn( "When using RewardDataCollatorWithPadding, you should set `max_length` in RewardConfig." " It will be set to `512` by default, but you should do it yourself in the future.", UserWarning, ) max_length = 512 if max_length is None and args.max_length is not None: max_length = args.max_length data_collator = RewardDataCollatorWithPadding(tokenizer, max_length=max_length) if args.remove_unused_columns: try: # for bc before https://github.com/huggingface/transformers/pull/25435 args.remove_unused_columns = False except FrozenInstanceError: args = replace(args, remove_unused_columns=False) # warn users warnings.warn( "When using RewardDataCollatorWithPadding, you should set `remove_unused_columns=False` in your RewardConfig" " we have set it for you, but you should do it yourself in the future.", UserWarning, ) self.use_reward_data_collator = True else: self.use_reward_data_collator = False super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) def compute_loss( self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], return_outputs=False, ) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: if not self.use_reward_data_collator: warnings.warn( "The current compute_loss is implemented for RewardDataCollatorWithPadding," " if you are using a custom data collator make sure you know what you are doing or" " implement your own compute_loss method." ) rewards_chosen = model( input_ids=inputs["input_ids_chosen"], attention_mask=inputs["attention_mask_chosen"], return_dict=True, )["logits"] rewards_rejected = model( input_ids=inputs["input_ids_rejected"], attention_mask=inputs["attention_mask_rejected"], return_dict=True, )["logits"] # calculate loss, optionally modulate with margin if "margin" in inputs: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected - inputs["margin"]).mean() else: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean() if self.args.center_rewards_coefficient is not None: loss += self.args.center_rewards_coefficient * torch.mean((rewards_chosen + rewards_rejected) ** 2) if return_outputs: return loss, { "rewards_chosen": rewards_chosen, "rewards_rejected": rewards_rejected, } return loss def prediction_step( self, model: Union[PreTrainedModel, nn.Module], inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] with torch.no_grad(): loss, logits_dict = self.compute_loss(model, inputs, return_outputs=True) if prediction_loss_only: return (loss, None, None) loss = loss.detach() logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys) logits = nested_detach(logits) # Stack accepted against rejected, mean over logits # and softmax to get preferences between accepted and rejected to sum to 1 logits = torch.stack(logits).mean(dim=2).softmax(dim=0).T labels = torch.zeros(logits.shape[0]) labels = self._prepare_inputs(labels) return loss, logits, labels def evaluate(self, *args, **kwargs): num_print_samples = kwargs.pop("num_print_samples", 4) self.visualize_samples(num_print_samples) return super().evaluate(*args, **kwargs) def visualize_samples(self, num_print_samples: int): """ Visualize the reward model logits prediction Args: num_print_samples (`int`, defaults to `4`): The number of samples to print. Set to `-1` to print all samples. """ eval_dataloader = self.get_eval_dataloader() table = defaultdict(list) for _, inputs in enumerate(eval_dataloader): _, logits, _ = self.prediction_step(self.model, inputs, prediction_loss_only=False) chosen_text = self.tokenizer.batch_decode(inputs["input_ids_chosen"], skip_special_tokens=True) rejected_text = self.tokenizer.batch_decode(inputs["input_ids_rejected"], skip_special_tokens=True) table["chosen_text"].extend(gather_object(chosen_text)) table["rejected_text"].extend(gather_object(rejected_text)) table["logits"].extend( gather_object([[round(inner_item, 4) for inner_item in item] for item in logits.tolist()]) ) if num_print_samples >= 0 and len(table["chosen_text"]) >= num_print_samples: break df = pd.DataFrame(table) if self.accelerator.process_index == 0: print_rich_table(df[:num_print_samples]) if "wandb" in self.args.report_to: import wandb if wandb.run is not None: wandb.log({"completions": wandb.Table(dataframe=df)}) @wraps(Trainer.push_to_hub) def push_to_hub( self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs, ) -> str: """ Overwrite the `push_to_hub` method in order to force-add the tag "reward-trainer" when pushing the model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details. Unlike the parent class, we don't use the `token` argument to mitigate security risks. """ kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs) return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs)
trl/trl/trainer/reward_trainer.py/0
{ "file_path": "trl/trl/trainer/reward_trainer.py", "repo_id": "trl", "token_count": 7195 }
496
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`. This particular script verifies this for DDP training. """ import evaluate import torch import transformer_engine.common.recipe as te_recipe import transformer_engine.pytorch as te from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities from torch.nn.parallel import DistributedDataParallel as DDP from transformer_engine.common.recipe import DelayedScaling from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.utils import FP8RecipeKwargs, set_seed from accelerate.utils.transformer_engine import convert_model MODEL_NAME = "bert-base-cased" METRIC = evaluate.load("glue", "mrpc") def train_baseline(): set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME) accelerator = Accelerator() device = accelerator.device model.to(device) # Convert the model to TE old_named_params = get_named_parameters(model) with torch.no_grad(): convert_model(model) FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"} fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS) new_named_params = get_named_parameters(model) # Convert the model to DDP device_ids, output_device = [accelerator.local_process_index], accelerator.local_process_index model = DDP(model, device_ids=device_ids, output_device=output_device) mapping = {p: new_named_params[n] for n, p in old_named_params.items()} for param_group in optimizer.param_groups: param_group["params"] = [mapping[p] for p in param_group["params"]] base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe): with torch.autocast(device_type="cuda", dtype=torch.bfloat16): batch = batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results def train_integration(): FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"} kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)] AcceleratorState()._reset_state(True) accelerator = Accelerator(mixed_precision="fp8", kwargs_handlers=kwargs_handlers) set_seed(42) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities( MODEL_NAME, accelerator=accelerator ) model, optimizer = accelerator.prepare(model, optimizer) base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) model.train() for _ in range(2): for batch in train_dataloader: outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() optimizer.zero_grad() lr_scheduler.step() trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator) assert ( trained_model_results["accuracy"] > base_model_results["accuracy"] ), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}' assert ( trained_model_results["f1"] > base_model_results["f1"] ), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}' return base_model_results, trained_model_results if __name__ == "__main__": baseline_not_trained, baseline_trained = train_baseline() accelerator_not_trained, accelerator_trained = train_integration() assert ( baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"] ), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}' assert ( baseline_not_trained["f1"] == accelerator_not_trained["f1"] ), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}' assert ( baseline_trained["accuracy"] == accelerator_trained["accuracy"] ), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}' assert ( baseline_trained["f1"] == accelerator_trained["f1"] ), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}' torch.distributed.destroy_process_group()
accelerate/benchmarks/fp8/ddp.py/0
{ "file_path": "accelerate/benchmarks/fp8/ddp.py", "repo_id": "accelerate", "token_count": 2219 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Launching Multi-GPU Training from a Jupyter Environment This tutorial teaches you how to fine tune a computer vision model with 🤗 Accelerate from a Jupyter Notebook on a distributed system. You will also learn how to setup a few requirements needed for ensuring your environment is configured properly, your data has been prepared properly, and finally how to launch training. <Tip> This tutorial is also available as a Jupyter Notebook [here](https://github.com/huggingface/notebooks/blob/main/examples/accelerate_examples/simple_cv_example.ipynb) </Tip> ## Configuring the Environment Before any training can be performed, a 🤗 Accelerate config file must exist in the system. Usually this can be done by running the following in a terminal and answering the prompts: ```bash accelerate config ``` However, if general defaults are fine and you are *not* running on a TPU, 🤗Accelerate has a utility to quickly write your GPU configuration into a config file via [`utils.write_basic_config`]. The following code will restart Jupyter after writing the configuration, as CUDA code was called to perform this. <Tip warning={true}> CUDA can't be initialized more than once on a multi-GPU system. It's fine to debug in the notebook and have calls to CUDA, but in order to finally train a full cleanup and restart will need to be performed. </Tip> ```python import os from accelerate.utils import write_basic_config write_basic_config() # Write a config file os._exit(00) # Restart the notebook ``` ## Preparing the Dataset and Model Next you should prepare your dataset. As mentioned at earlier, great care should be taken when preparing the `DataLoaders` and model to make sure that **nothing** is put on *any* GPU. If you do, it is recommended to put that specific code into a function and call that from within the notebook launcher interface, which will be shown later. Make sure the dataset is downloaded based on the directions [here](https://github.com/huggingface/accelerate/tree/main/examples#simple-vision-example) ```python import os, re, torch, PIL import numpy as np from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator from accelerate.utils import set_seed from timm import create_model ``` First you need to create a function to extract the class name based on a filename: ```python import os data_dir = "../../images" fnames = os.listdir(data_dir) fname = fnames[0] print(fname) ``` ```python out beagle_32.jpg ``` In the case here, the label is `beagle`. Using regex you can extract the label from the filename: ```python import re def extract_label(fname): stem = fname.split(os.path.sep)[-1] return re.search(r"^(.*)_\d+\.jpg$", stem).groups()[0] ``` ```python extract_label(fname) ``` And you can see it properly returned the right name for our file: ```python out "beagle" ``` Next a `Dataset` class should be made to handle grabbing the image and the label: ```python class PetsDataset(Dataset): def __init__(self, file_names, image_transform=None, label_to_id=None): self.file_names = file_names self.image_transform = image_transform self.label_to_id = label_to_id def __len__(self): return len(self.file_names) def __getitem__(self, idx): fname = self.file_names[idx] raw_image = PIL.Image.open(fname) image = raw_image.convert("RGB") if self.image_transform is not None: image = self.image_transform(image) label = extract_label(fname) if self.label_to_id is not None: label = self.label_to_id[label] return {"image": image, "label": label} ``` Now to build the dataset. Outside the training function you can find and declare all the filenames and labels and use them as references inside the launched function: ```python fnames = [os.path.join("../../images", fname) for fname in fnames if fname.endswith(".jpg")] ``` Next gather all the labels: ```python all_labels = [extract_label(fname) for fname in fnames] id_to_label = list(set(all_labels)) id_to_label.sort() label_to_id = {lbl: i for i, lbl in enumerate(id_to_label)} ``` Next, you should make a `get_dataloaders` function that will return your built dataloaders for you. As mentioned earlier, if data is automatically sent to the GPU or a TPU device when building your `DataLoaders`, they must be built using this method. ```python def get_dataloaders(batch_size: int = 64): "Builds a set of dataloaders with a batch_size" random_perm = np.random.permutation(len(fnames)) cut = int(0.8 * len(fnames)) train_split = random_perm[:cut] eval_split = random_perm[cut:] # For training a simple RandomResizedCrop will be used train_tfm = Compose([RandomResizedCrop((224, 224), scale=(0.5, 1.0)), ToTensor()]) train_dataset = PetsDataset([fnames[i] for i in train_split], image_transform=train_tfm, label_to_id=label_to_id) # For evaluation a deterministic Resize will be used eval_tfm = Compose([Resize((224, 224)), ToTensor()]) eval_dataset = PetsDataset([fnames[i] for i in eval_split], image_transform=eval_tfm, label_to_id=label_to_id) # Instantiate dataloaders train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) eval_dataloader = DataLoader(eval_dataset, shuffle=False, batch_size=batch_size * 2, num_workers=4) return train_dataloader, eval_dataloader ``` Finally, you should import the scheduler to be used later: ```python from torch.optim.lr_scheduler import CosineAnnealingLR ``` ## Writing the Training Function Now you can build the training loop. [`notebook_launcher`] works by passing in a function to call that will be ran across the distributed system. Here is a basic training loop for the animal classification problem: <Tip> The code has been split up to allow for explanations on each section. A full version that can be copy and pasted will be available at the end </Tip> ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) accelerator = Accelerator(mixed_precision=mixed_precision) ``` First you should set the seed and create an [`Accelerator`] object as early in the training loop as possible. <Tip warning={true}> If training on the TPU, your training loop should take in the model as a parameter and it should be instantiated outside of the training loop function. See the [TPU best practices](../concept_guides/training_tpu) to learn why </Tip> Next you should build your dataloaders and create your model: ```python train_dataloader, eval_dataloader = get_dataloaders(batch_size) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) ``` <Tip> You build the model here so that the seed also controls the new weight initialization </Tip> As you are performing transfer learning in this example, the encoder of the model starts out frozen so the head of the model can be trained only initially: ```python for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True ``` Normalizing the batches of images will make training a little faster: ```python mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] ``` To make these constants available on the active device, you should set it to the Accelerator's device: ```python mean = mean.to(accelerator.device) std = std.to(accelerator.device) ``` Next instantiate the rest of the PyTorch classes used for training: ```python optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) ``` Before passing everything to [`~Accelerator.prepare`]. <Tip> There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the prepare method. </Tip> ```python model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) ``` Now train the model: ```python for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` The evaluation loop will look slightly different compared to the training loop. The number of elements passed as well as the overall total accuracy of each batch will be added to two constants: ```python model.eval() accurate = 0 num_elems = 0 ``` Next you have the rest of your standard PyTorch loop: ```python for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) ``` Before finally the last major difference. When performing distributed evaluation, the predictions and labels need to be passed through [`~Accelerator.gather`] so that all of the data is available on the current device and a properly calculated metric can be achieved: ```python accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() ``` Now you just need to calculate the actual metric for this problem, and you can print it on the main process using [`~Accelerator.print`]: ```python eval_metric = accurate.item() / num_elems accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` A full version of this training loop is available below: ```python def training_loop(mixed_precision="fp16", seed: int = 42, batch_size: int = 64): set_seed(seed) # Initialize accelerator accelerator = Accelerator(mixed_precision=mixed_precision) # Build dataloaders train_dataloader, eval_dataloader = get_dataloaders(batch_size) # Instantiate the model (you build the model here so that the seed also controls new weight initaliziations) model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) # Freeze the base model for param in model.parameters(): param.requires_grad = False for param in model.get_classifier().parameters(): param.requires_grad = True # You can normalize the batches of images to be a bit faster mean = torch.tensor(model.default_cfg["mean"])[None, :, None, None] std = torch.tensor(model.default_cfg["std"])[None, :, None, None] # To make these constants available on the active device, set it to the accelerator device mean = mean.to(accelerator.device) std = std.to(accelerator.device) # Instantiate the optimizer optimizer = torch.optim.Adam(params=model.parameters(), lr=3e-2 / 25) # Instantiate the learning rate scheduler lr_scheduler = OneCycleLR(optimizer=optimizer, max_lr=3e-2, epochs=5, steps_per_epoch=len(train_dataloader)) # Prepare everything # There is no specific order to remember, you just need to unpack the objects in the same order you gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # Now you train the model for epoch in range(5): model.train() for batch in train_dataloader: inputs = (batch["image"] - mean) / std outputs = model(inputs) loss = torch.nn.functional.cross_entropy(outputs, batch["label"]) accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() accurate = 0 num_elems = 0 for batch in eval_dataloader: inputs = (batch["image"] - mean) / std with torch.no_grad(): outputs = model(inputs) predictions = outputs.argmax(dim=-1) accurate_preds = accelerator.gather(predictions) == accelerator.gather(batch["label"]) num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() eval_metric = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}") ``` ## Using the notebook_launcher All that's left is to use the [`notebook_launcher`]. You pass in the function, the arguments (as a tuple), and the number of processes to train on. (See the [documentation](../package_reference/launchers) for more information) ```python from accelerate import notebook_launcher ``` ```python args = ("fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=2) ``` In the case of running on multiple nodes, you need to set up a Jupyter session at each node and run the launching cell at the same time. For an environment containing 2 nodes (computers) with 8 GPUs each and the main computer with an IP address of "172.31.43.8", it would look like so: ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=0, num_nodes=2, num_processes=8) ``` And in the second Jupyter session on the other machine: <Tip> Notice how the `node_rank` has changed </Tip> ```python notebook_launcher(training_loop, args, master_addr="172.31.43.8", node_rank=1, num_nodes=2, num_processes=8) ``` In the case of running on the TPU, it would look like so: ```python model = create_model("resnet50d", pretrained=True, num_classes=len(label_to_id)) args = (model, "fp16", 42, 64) notebook_launcher(training_loop, args, num_processes=8) ``` To launch the training process with elasticity, enabling fault tolerance, you can use the `elastic_launch` feature provided by PyTorch. This requires setting additional parameters such as `rdzv_backend` and `max_restarts`. Here is an example of how to use `notebook_launcher` with elastic capabilities: ```python notebook_launcher( training_loop, args, num_processes=2, max_restarts=3 ) ``` As it's running it will print the progress as well as state how many devices you ran on. This tutorial was ran with two GPUs: ```python out Launching training on 2 GPUs. epoch 0: 88.12 epoch 1: 91.73 epoch 2: 92.58 epoch 3: 93.90 epoch 4: 94.71 ``` And that's it! Please note that [`notebook_launcher`] ignores the 🤗 Accelerate config file, to launch based on the config use: ```bash accelerate launch ``` ## Debugging A common issue when running the `notebook_launcher` is receiving a CUDA has already been initialized issue. This usually stems from an import or prior code in the notebook that makes a call to the PyTorch `torch.cuda` sublibrary. To help narrow down what went wrong, you can launch the `notebook_launcher` with `ACCELERATE_DEBUG_MODE=yes` in your environment and an additional check will be made when spawning that a regular process can be created and utilize CUDA without issue. (Your CUDA code can still be ran afterwards). ## Conclusion This notebook showed how to perform distributed training from inside of a Jupyter Notebook. Some key notes to remember: - Make sure to save any code that use CUDA (or CUDA imports) for the function passed to [`notebook_launcher`] - Set the `num_processes` to be the number of devices used for training (such as number of GPUs, CPUs, TPUs, etc) - If using the TPU, declare your model outside the training loop function
accelerate/docs/source/basic_tutorials/notebook.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/notebook.md", "repo_id": "accelerate", "token_count": 5708 }
1
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Accelerator The [`Accelerator`] is the main class for enabling distributed training on any type of training setup. Read the [Add Accelerator to your code](../basic_tutorials/migration) tutorial to learn more about how to add the [`Accelerator`] to your script. ## Accelerator[[api]] [[autodoc]] Accelerator ## Utilities [[autodoc]] accelerate.utils.gather_object
accelerate/docs/source/package_reference/accelerator.md/0
{ "file_path": "accelerate/docs/source/package_reference/accelerator.md", "repo_id": "accelerate", "token_count": 289 }
2
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Handling big models for inference One of the biggest advancements 🤗 Accelerate provides is the concept of [large model inference](../concept_guides/big_model_inference) wherein you can perform *inference* on models that cannot fully fit on your graphics card. This tutorial will be broken down into two parts showcasing how to use both 🤗 Accelerate and 🤗 Transformers (a higher API-level) to make use of this idea. ## Using 🤗 Accelerate For these tutorials, we'll assume a typical workflow for loading your model in such that: ```py import torch my_model = ModelClass(...) state_dict = torch.load(checkpoint_file) my_model.load_state_dict(state_dict) ``` Note that here we assume that `ModelClass` is a model that takes up more video-card memory than what can fit on your device (be it `mps` or `cuda`). The first step is to init an empty skeleton of the model which won't take up any RAM using the [`init_empty_weights`] context manager: ```py from accelerate import init_empty_weights with init_empty_weights(): my_model = ModelClass(...) ``` With this `my_model` currently is "parameterless", hence leaving the smaller footprint than what one would normally get loading this onto the CPU directly. Next we need to load in the weights to our model so we can perform inference. For this we will use [`load_checkpoint_and_dispatch`], which as the name implies will load a checkpoint inside your empty model and dispatch the weights for each layer across all the devices you have available (GPU/MPS and CPU RAM). To determine how this `dispatch` can be performed, generally specifying `device_map="auto"` will be good enough as 🤗 Accelerate will attempt to fill all the space in your GPU(s), then loading them to the CPU, and finally if there is not enough RAM it will be loaded to the disk (the absolute slowest option). <Tip> For more details on designing your own device map, see this section of the [concept guide](../concept_guides/big_model_inference#designing-a-device-map) </Tip> See an example below: ```py from accelerate import load_checkpoint_and_dispatch model = load_checkpoint_and_dispatch( model, checkpoint=checkpoint_file, device_map="auto" ) ``` <Tip> If there are certain "chunks" of layers that shouldn't be split, you can pass them in as `no_split_module_classes`. Read more about it [here](../concept_guides/big_model_inference#loading-weights) </Tip> <Tip> Also to save on memory (such as if the `state_dict` will not fit in RAM), a model's weights can be divided and split into multiple checkpoint files. Read more about it [here](../concept_guides/big_model_inference#sharded-checkpoints) </Tip> Now that the model is dispatched fully, you can perform inference as normal with the model: ```py input = torch.randn(2,3) input = input.to("cuda") output = model(input) ``` What will happen now is each time the input gets passed through a layer, it will be sent from the CPU to the GPU (or disk to CPU to GPU), the output is calculated, and then the layer is pulled back off the GPU going back down the line. While this adds some overhead to the inference being performed, through this method it is possible to run **any size model** on your system, as long as the largest layer is capable of fitting on your GPU. <Tip> Multiple GPUs can be utilized, however this is considered "model parallelism" and as a result only one GPU will be active at a given moment, waiting for the prior one to send it the output. You should launch your script normally with `python` and not need `torchrun`, `accelerate launch`, etc. </Tip> For a visual representation of this, check out the animation below: <Youtube id="MWCSGj9jEAo" /> ### Complete Example Below is the full example showcasing what we performed above: ```py import torch from accelerate import init_empty_weights, load_checkpoint_and_dispatch with init_empty_weights(): model = MyModel(...) model = load_checkpoint_and_dispatch( model, checkpoint=checkpoint_file, device_map="auto" ) input = torch.randn(2,3) input = input.to("cuda") output = model(input) ``` ## Using 🤗 Transformers, 🤗 Diffusers, and other 🤗 Open Source Libraries Libraries that support 🤗 Accelerate big model inference include all of the earlier logic in their `from_pretrained` constructors. These operate by specifying a string representing the model to download from the [🤗 Hub](https://hf.co/models) and then denoting `device_map="auto"` along with a few extra parameters. As a brief example, we will look at using `transformers` and loading in Big Science's T0pp model. ```py from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") ``` After loading the model in, the initial steps from before to prepare a model have all been done and the model is fully ready to make use of all the resources in your machine. Through these constructors, you can also save *more* memory by specifying the precision the model is loaded into as well, through the `torch_dtype` parameter, such as: ```py from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto", torch_dtype=torch.float16) ``` To learn more about this, check out the 🤗 Transformers documentation available [here](https://huggingface.co/docs/transformers/main/en/main_classes/model#large-model-loading). ## Where to go from here For a much more detailed look at big model inference, be sure to check out the [Conceptual Guide on it](../concept_guides/big_model_inference)
accelerate/docs/source/usage_guides/big_modeling.md/0
{ "file_path": "accelerate/docs/source/usage_guides/big_modeling.md", "repo_id": "accelerate", "token_count": 1766 }
3
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Amazon SageMaker Hugging Face and Amazon introduced new [Hugging Face Deep Learning Containers (DLCs)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) to make it easier than ever to train Hugging Face Transformer models in [Amazon SageMaker](https://aws.amazon.com/sagemaker/). ## Getting Started ### Setup & Installation Before you can run your 🤗 Accelerate scripts on Amazon SageMaker you need to sign up for an AWS account. If you do not have an AWS account yet learn more [here](https://docs.aws.amazon.com/sagemaker/latest/dg/gs-set-up.html). After you have your AWS Account you need to install the `sagemaker` sdk for 🤗 Accelerate with: ```bash pip install "accelerate[sagemaker]" --upgrade ``` 🤗 Accelerate currently uses the 🤗 DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. 🤗 Accelerate is not in the DLC yet (will soon be added!) so to use it within Amazon SageMaker you need to create a `requirements.txt` in the same directory where your training script is located and add it as dependency: ``` accelerate ``` You should also add any other dependencies you have to this `requirements.txt`. ### Configure 🤗 Accelerate You can configure the launch configuration for Amazon SageMaker the same as you do for non SageMaker training jobs with the 🤗 Accelerate CLI: ```bash accelerate config # In which compute environment are you running? ([0] This machine, [1] AWS (Amazon SageMaker)): 1 ``` 🤗 Accelerate will go through a questionnaire about your Amazon SageMaker setup and create a config file you can edit. <Tip> 🤗 Accelerate is not saving any of your credentials. </Tip> ### Prepare a 🤗 Accelerate fine-tuning script The training script is very similar to a training script you might run outside of SageMaker, but to save your model after training you need to specify either `/opt/ml/model` or use `os.environ["SM_MODEL_DIR"]` as your save directory. After training, artifacts in this directory are uploaded to S3: ```diff - torch.save('/opt/ml/model`) + accelerator.save('/opt/ml/model') ``` <Tip warning={true}> SageMaker doesn’t support argparse actions. If you want to use, for example, boolean hyperparameters, you need to specify type as bool in your script and provide an explicit True or False value for this hyperparameter. [[REF]](https://sagemaker.readthedocs.io/en/stable/frameworks/pytorch/using_pytorch.html#prepare-a-pytorch-training-script). </Tip> ### Launch Training You can launch your training with 🤗 Accelerate CLI with: ``` accelerate launch path_to_script.py --args_to_the_script ``` This will launch your training script using your configuration. The only thing you have to do is provide all the arguments needed by your training script as named arguments. **Examples** <Tip> If you run one of the example scripts, don't forget to add `accelerator.save('/opt/ml/model')` to it. </Tip> ```bash accelerate launch ./examples/sagemaker_example.py ``` Outputs: ``` Configuring Amazon SageMaker environment Converting Arguments to Hyperparameters Creating Estimator 2021-04-08 11:56:50 Starting - Starting the training job... 2021-04-08 11:57:13 Starting - Launching requested ML instancesProfilerReport-1617883008: InProgress ......... 2021-04-08 11:58:54 Starting - Preparing the instances for training......... 2021-04-08 12:00:24 Downloading - Downloading input data 2021-04-08 12:00:24 Training - Downloading the training image.................. 2021-04-08 12:03:39 Training - Training image download completed. Training in progress.. ........ epoch 0: {'accuracy': 0.7598039215686274, 'f1': 0.8178438661710037} epoch 1: {'accuracy': 0.8357843137254902, 'f1': 0.882249560632689} epoch 2: {'accuracy': 0.8406862745098039, 'f1': 0.8869565217391304} ........ 2021-04-08 12:05:40 Uploading - Uploading generated training model 2021-04-08 12:05:40 Completed - Training job completed Training seconds: 331 Billable seconds: 331 You can find your model data at: s3://your-bucket/accelerate-sagemaker-1-2021-04-08-11-56-47-108/output/model.tar.gz ``` ## Advanced Features ### Distributed Training: Data Parallelism Set up the accelerate config by running `accelerate config` and answer the SageMaker questions and set it up. To use SageMaker DDP, select it when asked `What is the distributed mode? ([0] No distributed training, [1] data parallelism):`. Example config below: ```yaml base_job_name: accelerate-sagemaker-1 compute_environment: AMAZON_SAGEMAKER distributed_type: DATA_PARALLEL ec2_instance_type: ml.p3.16xlarge iam_role_name: xxxxx image_uri: null mixed_precision: fp16 num_machines: 1 profile: xxxxx py_version: py38 pytorch_version: 1.10.2 region: us-east-1 transformers_version: 4.17.0 use_cpu: false ``` ### Distributed Training: Model Parallelism *currently in development, will be supported soon.* ### Python packages and dependencies 🤗 Accelerate currently uses the 🤗 DLCs, with `transformers`, `datasets` and `tokenizers` pre-installed. If you want to use different/other Python packages you can do this by adding them to the `requirements.txt`. These packages will be installed before your training script is started. ### Local Training: SageMaker Local mode The local mode in the SageMaker SDK allows you to run your training script locally inside the HuggingFace DLC (Deep Learning container) or using your custom container image. This is useful for debugging and testing your training script inside the final container environment. Local mode uses Docker compose (*Note: Docker Compose V2 is not supported yet*). The SDK will handle the authentication against ECR to pull the DLC to your local environment. You can emulate CPU (single and multi-instance) and GPU (single instance) SageMaker training jobs. To use local mode, you need to set your `ec2_instance_type` to `local`. ```yaml ec2_instance_type: local ``` ### Advanced configuration The configuration allows you to override parameters for the [Estimator](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html). These settings have to be applied in the config file and are not part of `accelerate config`. You can control many additional aspects of the training job, e.g. use Spot instances, enable network isolation and many more. ```yaml additional_args: # enable network isolation to restrict internet access for containers enable_network_isolation: True ``` You can find all available configuration [here](https://sagemaker.readthedocs.io/en/stable/api/training/estimators.html). ### Use Spot Instances You can use Spot Instances e.g. using (see [Advanced configuration](#advanced-configuration)): ```yaml additional_args: use_spot_instances: True max_wait: 86400 ``` *Note: Spot Instances are subject to be terminated and training to be continued from a checkpoint. This is not handled in 🤗 Accelerate out of the box. Contact us if you would like this feature.* ### Remote scripts: Use scripts located on Github *undecided if feature is needed. Contact us if you would like this feature.*
accelerate/docs/source/usage_guides/sagemaker.md/0
{ "file_path": "accelerate/docs/source/usage_guides/sagemaker.md", "repo_id": "accelerate", "token_count": 2261 }
4
distributed_type: FSDP fsdp_config: fsdp_activation_checkpointing: false fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch: BACKWARD_PRE fsdp_cpu_ram_efficient_loading: true fsdp_forward_prefetch: false fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_use_orig_params: true
accelerate/examples/slurm/fsdp_config.yaml/0
{ "file_path": "accelerate/examples/slurm/fsdp_config.yaml", "repo_id": "accelerate", "token_count": 167 }
5
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_boto3_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_boto3_available(): import boto3 # noqa: F401 def _create_iam_role_for_sagemaker(role_name): iam_client = boto3.client("iam") sagemaker_trust_policy = { "Version": "2012-10-17", "Statement": [ {"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=json.dumps(sagemaker_trust_policy, indent=2) ) policy_document = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "sagemaker:*", "ecr:GetDownloadUrlForLayer", "ecr:BatchGetImage", "ecr:BatchCheckLayerAvailability", "ecr:GetAuthorizationToken", "cloudwatch:PutMetricData", "cloudwatch:GetMetricData", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "logs:CreateLogGroup", "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents", "logs:GetLogEvents", "s3:CreateBucket", "s3:ListBucket", "s3:GetBucketLocation", "s3:GetObject", "s3:PutObject", ], "Resource": "*", } ], } # attach policy to role iam_client.put_role_policy( RoleName=role_name, PolicyName=f"{role_name}_policy_permission", PolicyDocument=json.dumps(policy_document, indent=2), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f"role {role_name} already exists. Using existing one") def _get_iam_role_arn(role_name): iam_client = boto3.client("iam") return iam_client.get_role(RoleName=role_name)["Role"]["Arn"] def get_sagemaker_input(): credentials_configuration = _ask_options( "How do you want to authorize?", ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "], int, ) aws_profile = None if credentials_configuration == 0: aws_profile = _ask_field("Enter your AWS Profile name: [default] ", default="default") os.environ["AWS_PROFILE"] = aws_profile else: print( "Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with," "`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" ) aws_access_key_id = _ask_field("AWS Access Key ID: ") os.environ["AWS_ACCESS_KEY_ID"] = aws_access_key_id aws_secret_access_key = _ask_field("AWS Secret Access Key: ") os.environ["AWS_SECRET_ACCESS_KEY"] = aws_secret_access_key aws_region = _ask_field("Enter your AWS Region: [us-east-1]", default="us-east-1") os.environ["AWS_DEFAULT_REGION"] = aws_region role_management = _ask_options( "Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?", ["Provide IAM Role name", "Create new IAM role using credentials"], int, ) if role_management == 0: iam_role_name = _ask_field("Enter your IAM role name: ") else: iam_role_name = "accelerate_sagemaker_execution_role" print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials') _create_iam_role_for_sagemaker(iam_role_name) is_custom_docker_image = _ask_field( "Do you want to use custom Docker image? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) docker_image = None if is_custom_docker_image: docker_image = _ask_field("Enter your Docker image: ", lambda x: str(x).lower()) is_sagemaker_inputs_enabled = _ask_field( "Do you want to provide SageMaker input channels with data locations? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) sagemaker_inputs_file = None if is_sagemaker_inputs_enabled: sagemaker_inputs_file = _ask_field( "Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ", lambda x: str(x).lower(), ) is_sagemaker_metrics_enabled = _ask_field( "Do you want to enable SageMaker metrics? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) sagemaker_metrics_file = None if is_sagemaker_metrics_enabled: sagemaker_metrics_file = _ask_field( "Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ", lambda x: str(x).lower(), ) distributed_type = _ask_options( "What is the distributed mode?", ["No distributed training", "Data parallelism"], _convert_sagemaker_distributed_mode, ) dynamo_config = {} use_dynamo = _ask_field( "Do you wish to optimize your script with torch dynamo?[yes/NO]:", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_dynamo: prefix = "dynamo_" dynamo_config[prefix + "backend"] = _ask_options( "Which dynamo backend would you like to use?", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) use_custom_options = _ask_field( "Do you want to customize the defaults sent to torch.compile? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) if use_custom_options: dynamo_config[prefix + "mode"] = _ask_options( "Which mode do you want to use?", TORCH_DYNAMO_MODES, lambda x: TORCH_DYNAMO_MODES[int(x)], default="default", ) dynamo_config[prefix + "use_fullgraph"] = _ask_field( "Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) dynamo_config[prefix + "use_dynamic"] = _ask_field( "Do you want to enable dynamic shape tracing? [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) ec2_instance_query = "Which EC2 instance type you want to use for your training?" if distributed_type != SageMakerDistributedType.NO: ec2_instance_type = _ask_options( ec2_instance_query, SAGEMAKER_PARALLEL_EC2_INSTANCES, lambda x: SAGEMAKER_PARALLEL_EC2_INSTANCES[int(x)] ) else: ec2_instance_query += "? [ml.p3.2xlarge]:" ec2_instance_type = _ask_field(ec2_instance_query, lambda x: str(x).lower(), default="ml.p3.2xlarge") debug = False if distributed_type != SageMakerDistributedType.NO: debug = _ask_field( "Should distributed operations be checked while running for errors? This can avoid timeout issues but will be slower. [yes/NO]: ", _convert_yes_no_to_bool, default=False, error_message="Please enter yes or no.", ) num_machines = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): num_machines = _ask_field( "How many machines do you want use? [1]: ", int, default=1, ) mixed_precision = _ask_options( "Do you wish to use FP16 or BF16 (mixed precision)?", ["no", "fp16", "bf16", "fp8"], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( "Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." ) return SageMakerConfig( image_uri=docker_image, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=distributed_type, use_cpu=False, dynamo_config=dynamo_config, ec2_instance_type=ec2_instance_type, profile=aws_profile, region=aws_region, iam_role_name=iam_role_name, mixed_precision=mixed_precision, num_machines=num_machines, sagemaker_inputs_file=sagemaker_inputs_file, sagemaker_metrics_file=sagemaker_metrics_file, debug=debug, )
accelerate/src/accelerate/commands/config/sagemaker.py/0
{ "file_path": "accelerate/src/accelerate/commands/config/sagemaker.py", "repo_id": "accelerate", "token_count": 4784 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from typing import Dict, List, Mapping, Optional, Union import torch import torch.nn as nn from .state import PartialState from .utils import ( PrefixedDataset, find_device, named_module_tensors, send_to_device, set_module_tensor_to_device, ) from .utils.memory import clear_device_cache from .utils.modeling import get_non_persistent_buffers from .utils.other import recursive_getattr _accelerate_added_attributes = ["to", "cuda", "npu", "xpu", "mlu", "musa"] class ModelHook: """ A hook that contains callbacks to be executed just before and after the forward method of a model. The difference with PyTorch existing hooks is that they get passed along the kwargs. Class attribute: - **no_grad** (`bool`, *optional*, defaults to `False`) -- Whether or not to execute the actual forward pass under the `torch.no_grad()` context manager. """ no_grad = False def init_hook(self, module): """ To be executed when the hook is attached to the module. Args: module (`torch.nn.Module`): The module attached to this hook. """ return module def pre_forward(self, module, *args, **kwargs): """ To be executed just before the forward method of the model. Args: module (`torch.nn.Module`): The module whose forward pass will be executed just after this event. args (`Tuple[Any]`): The positional arguments passed to the module. kwargs (`Dict[Str, Any]`): The keyword arguments passed to the module. Returns: `Tuple[Tuple[Any], Dict[Str, Any]]`: A tuple with the treated `args` and `kwargs`. """ return args, kwargs def post_forward(self, module, output): """ To be executed just after the forward method of the model. Args: module (`torch.nn.Module`): The module whose forward pass been executed just before this event. output (`Any`): The output of the module. Returns: `Any`: The processed `output`. """ return output def detach_hook(self, module): """ To be executed when the hook is detached from a module. Args: module (`torch.nn.Module`): The module detached from this hook. """ return module class SequentialHook(ModelHook): """ A hook that can contain several hooks and iterates through them at each event. """ def __init__(self, *hooks): self.hooks = hooks def init_hook(self, module): for hook in self.hooks: module = hook.init_hook(module) return module def pre_forward(self, module, *args, **kwargs): for hook in self.hooks: args, kwargs = hook.pre_forward(module, *args, **kwargs) return args, kwargs def post_forward(self, module, output): for hook in self.hooks: output = hook.post_forward(module, output) return output def detach_hook(self, module): for hook in self.hooks: module = hook.detach_hook(module) return module def add_hook_to_module(module: nn.Module, hook: ModelHook, append: bool = False): """ Adds a hook to a given module. This will rewrite the `forward` method of the module to include the hook, to remove this behavior and restore the original `forward` method, use `remove_hook_from_module`. <Tip warning={true}> If the module already contains a hook, this will replace it with the new hook passed by default. To chain two hooks together, pass `append=True`, so it chains the current and new hook into an instance of the `SequentialHook` class. </Tip> Args: module (`torch.nn.Module`): The module to attach a hook to. hook (`ModelHook`): The hook to attach. append (`bool`, *optional*, defaults to `False`): Whether the hook should be chained with an existing one (if module already contains a hook) or not. Returns: `torch.nn.Module`: The same module, with the hook attached (the module is modified in place, so the result can be discarded). """ if append and (getattr(module, "_hf_hook", None) is not None): old_hook = module._hf_hook remove_hook_from_module(module) hook = SequentialHook(old_hook, hook) if hasattr(module, "_hf_hook") and hasattr(module, "_old_forward"): # If we already put some hook on this module, we replace it with the new one. old_forward = module._old_forward else: old_forward = module.forward module._old_forward = old_forward module = hook.init_hook(module) module._hf_hook = hook def new_forward(module, *args, **kwargs): args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs) if module._hf_hook.no_grad: with torch.no_grad(): output = module._old_forward(*args, **kwargs) else: output = module._old_forward(*args, **kwargs) return module._hf_hook.post_forward(module, output) # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) else: module.forward = functools.update_wrapper(functools.partial(new_forward, module), old_forward) return module def remove_hook_from_module(module: nn.Module, recurse=False): """ Removes any hook attached to a module via `add_hook_to_module`. Args: module (`torch.nn.Module`): The module to attach a hook to. recurse (`bool`, **optional**): Whether to remove the hooks recursively Returns: `torch.nn.Module`: The same module, with the hook detached (the module is modified in place, so the result can be discarded). """ if hasattr(module, "_hf_hook"): module._hf_hook.detach_hook(module) delattr(module, "_hf_hook") if hasattr(module, "_old_forward"): # Overriding a GraphModuleImpl forward freezes the forward call and later modifications on the graph will fail. # Reference: https://pytorch.slack.com/archives/C3PDTEV8E/p1705929610405409 if "GraphModuleImpl" in str(type(module)): module.__class__.forward = module._old_forward else: module.forward = module._old_forward delattr(module, "_old_forward") # Remove accelerate added warning hooks from dispatch_model for attr in _accelerate_added_attributes: module.__dict__.pop(attr, None) if recurse: for child in module.children(): remove_hook_from_module(child, recurse) return module class AlignDevicesHook(ModelHook): """ A generic `ModelHook` that ensures inputs and model weights are on the same device for the forward pass of the associated module, potentially offloading the weights after the forward pass. Args: execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. io_same_device (`bool`, *optional*, defaults to `False`): Whether or not the output should be placed on the same device as the input was. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. place_submodules (`bool`, *optional*, defaults to `False`): Whether to place the submodules on `execution_device` during the `init_hook` event. """ def __init__( self, execution_device: Optional[Union[int, str, torch.device]] = None, offload: bool = False, io_same_device: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, place_submodules: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): self.execution_device = execution_device self.offload = offload self.io_same_device = io_same_device self.weights_map = weights_map self.offload_buffers = offload_buffers self.place_submodules = place_submodules self.skip_keys = skip_keys # Will contain the input device when `io_same_device=True`. self.input_device = None self.param_original_devices = {} self.buffer_original_devices = {} self.tied_params_names = set() # The hook pre_forward/post_forward need to have knowledge of this dictionary, as with offloading we want to avoid duplicating memory # for tied weights already loaded on the target execution device. self.tied_params_map = tied_params_map def __repr__(self): return ( f"AlignDevicesHook(execution_device={self.execution_device}, offload={self.offload}, " f"io_same_device={self.io_same_device}, offload_buffers={self.offload_buffers}, " f"place_submodules={self.place_submodules}, skip_keys={repr(self.skip_keys)})" ) def init_hook(self, module): # In case the AlignDevicesHook is on meta device, ignore tied weights as data_ptr() is then always zero. if self.execution_device == "meta" or self.execution_device == torch.device("meta"): self.tied_params_map = None if not self.offload and self.execution_device is not None: for name, _ in named_module_tensors(module, recurse=self.place_submodules): set_module_tensor_to_device(module, name, self.execution_device, tied_params_map=self.tied_params_map) elif self.offload: self.original_devices = { name: param.device for name, param in named_module_tensors(module, recurse=self.place_submodules) } if self.weights_map is None: self.weights_map = { name: param.to("cpu") for name, param in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules ) } for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True ): # When using disk offloading, we can not rely on `weights_map[name].data_ptr()` as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. # As we have no reliable way to track the shared data pointer of tied weights in this case, we use tied_params_names: List[str] # to add on the fly pointers to `tied_params_map` in the pre_forward call. if ( self.tied_params_map is not None and recursive_getattr(module, name).data_ptr() in self.tied_params_map ): self.tied_params_names.add(name) set_module_tensor_to_device(module, name, "meta") if not self.offload_buffers and self.execution_device is not None: for name, _ in module.named_buffers(recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) elif self.offload_buffers and self.execution_device is not None: for name in get_non_persistent_buffers(module, recurse=self.place_submodules): set_module_tensor_to_device( module, name, self.execution_device, tied_params_map=self.tied_params_map ) return module def pre_forward(self, module, *args, **kwargs): if self.io_same_device: self.input_device = find_device([args, kwargs]) if self.offload: self.tied_pointers_to_remove = set() for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): fp16_statistics = None value = self.weights_map[name] if "weight" in name and name.replace("weight", "SCB") in self.weights_map.keys(): if value.dtype == torch.int8: fp16_statistics = self.weights_map[name.replace("weight", "SCB")] # In case we are using offloading with tied weights, we need to keep track of the offloaded weights # that are loaded on device at this point, as we will need to remove them as well from the dictionary # self.tied_params_map in order to allow to free memory. if name in self.tied_params_names and value.data_ptr() not in self.tied_params_map: self.tied_params_map[value.data_ptr()] = {} if ( value is not None and self.tied_params_map is not None and value.data_ptr() in self.tied_params_map and self.execution_device not in self.tied_params_map[value.data_ptr()] ): self.tied_pointers_to_remove.add((value.data_ptr(), self.execution_device)) set_module_tensor_to_device( module, name, self.execution_device, value=value, fp16_statistics=fp16_statistics, tied_params_map=self.tied_params_map, ) return send_to_device(args, self.execution_device), send_to_device( kwargs, self.execution_device, skip_keys=self.skip_keys ) def post_forward(self, module, output): if self.offload: for name, _ in named_module_tensors( module, include_buffers=self.offload_buffers, recurse=self.place_submodules, remove_non_persistent=True, ): set_module_tensor_to_device(module, name, "meta") if type(module).__name__ == "Linear8bitLt": module.state.SCB = None module.state.CxB = None # We may have loaded tied weights into self.tied_params_map (avoiding to load them several times in e.g. submodules): remove them from # this dictionary to allow the garbage collector to do its job. for value_pointer, device in self.tied_pointers_to_remove: del self.tied_params_map[value_pointer][device] self.tied_pointers_to_remove = set() if self.io_same_device and self.input_device is not None: output = send_to_device(output, self.input_device, skip_keys=self.skip_keys) return output def detach_hook(self, module): if self.offload: for name, device in self.original_devices.items(): if device != torch.device("meta"): set_module_tensor_to_device(module, name, device, value=self.weights_map.get(name, None)) return module def attach_execution_device_hook( module: torch.nn.Module, execution_device: Union[int, str, torch.device], skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Recursively attaches `AlignDevicesHook` to all submodules of a given model to make sure they have the right execution device Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`int`, `str` or `torch.device`): The device on which inputs and model weights should be placed before the forward pass. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ if not hasattr(module, "_hf_hook") and len(module.state_dict()) > 0: add_hook_to_module( module, AlignDevicesHook(execution_device, skip_keys=skip_keys, tied_params_map=tied_params_map), ) # Break the recursion if we get to a preload module. if preload_module_classes is not None and module.__class__.__name__ in preload_module_classes: return for child in module.children(): attach_execution_device_hook(child, execution_device, tied_params_map=tied_params_map) def attach_align_device_hook( module: torch.nn.Module, execution_device: Optional[torch.device] = None, offload: bool = False, weights_map: Optional[Mapping] = None, offload_buffers: bool = False, module_name: str = "", skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Recursively attaches `AlignDevicesHook` to all submodules of a given model that have direct parameters and/or buffers. Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`torch.device`, *optional*): The device on which inputs and model weights should be placed before the forward pass. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. module_name (`str`, *optional*, defaults to `""`): The name of the module. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ # Attach the hook on this module if it has any direct tensor. directs = named_module_tensors(module) full_offload = ( offload and preload_module_classes is not None and module.__class__.__name__ in preload_module_classes ) if len(list(directs)) > 0 or full_offload: if weights_map is not None: prefix = f"{module_name}." if len(module_name) > 0 else "" prefixed_weights_map = PrefixedDataset(weights_map, prefix) else: prefixed_weights_map = None hook = AlignDevicesHook( execution_device=execution_device, offload=offload, weights_map=prefixed_weights_map, offload_buffers=offload_buffers, place_submodules=full_offload, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook, append=True) # We stop the recursion in case we hit the full offload. if full_offload: return # Recurse on all children of the module. for child_name, child in module.named_children(): child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name attach_align_device_hook( child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) def remove_hook_from_submodules(module: nn.Module): """ Recursively removes all hooks attached on the submodules of a given model. Args: module (`torch.nn.Module`): The module on which to remove all hooks. """ remove_hook_from_module(module) for child in module.children(): remove_hook_from_submodules(child) def attach_align_device_hook_on_blocks( module: nn.Module, execution_device: Optional[Union[torch.device, Dict[str, torch.device]]] = None, offload: Union[bool, Dict[str, bool]] = False, weights_map: Mapping = None, offload_buffers: bool = False, module_name: str = "", skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ Attaches `AlignDevicesHook` to all blocks of a given model as needed. Args: module (`torch.nn.Module`): The module where we want to attach the hooks. execution_device (`torch.device` or `Dict[str, torch.device]`, *optional*): The device on which inputs and model weights should be placed before the forward pass. It can be one device for the whole module, or a dictionary mapping module name to device. offload (`bool`, *optional*, defaults to `False`): Whether or not the weights should be offloaded after the forward pass. It can be one boolean for the whole module, or a dictionary mapping module name to boolean. weights_map (`Mapping[str, torch.Tensor]`, *optional*): When the model weights are offloaded, a (potentially lazy) map from param names to the tensor values. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the associated module's buffers when offloading. module_name (`str`, *optional*, defaults to `""`): The name of the module. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. tied_params_map (Optional[Dict[int, Dict[torch.device, torch.Tensor]]], *optional*, defaults to `None`): A map of data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight for all others, instead of duplicating memory. """ # If one device and one offload, we've got one hook. if not isinstance(execution_device, Mapping) and not isinstance(offload, dict): if not offload: hook = AlignDevicesHook( execution_device=execution_device, io_same_device=True, skip_keys=skip_keys, place_submodules=True, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) else: attach_align_device_hook( module, execution_device=execution_device, offload=True, weights_map=weights_map, offload_buffers=offload_buffers, module_name=module_name, skip_keys=skip_keys, tied_params_map=tied_params_map, ) return if not isinstance(execution_device, Mapping): execution_device = {key: execution_device for key in offload.keys()} if not isinstance(offload, Mapping): offload = {key: offload for key in execution_device.keys()} if module_name in execution_device and module_name in offload and not offload[module_name]: hook = AlignDevicesHook( execution_device=execution_device[module_name], offload_buffers=offload_buffers, io_same_device=(module_name == ""), place_submodules=True, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) attach_execution_device_hook(module, execution_device[module_name], tied_params_map=tied_params_map) elif module_name in execution_device and module_name in offload: attach_align_device_hook( module, execution_device=execution_device[module_name], offload=True, weights_map=weights_map, offload_buffers=offload_buffers, module_name=module_name, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map, ) if not hasattr(module, "_hf_hook"): hook = AlignDevicesHook( execution_device=execution_device[module_name], io_same_device=(module_name == ""), skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) attach_execution_device_hook( module, execution_device[module_name], preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) elif module_name == "": hook = AlignDevicesHook( execution_device=execution_device.get(""), io_same_device=True, skip_keys=skip_keys, tied_params_map=tied_params_map, ) add_hook_to_module(module, hook) for child_name, child in module.named_children(): child_name = f"{module_name}.{child_name}" if len(module_name) > 0 else child_name attach_align_device_hook_on_blocks( child, execution_device=execution_device, offload=offload, weights_map=weights_map, offload_buffers=offload_buffers, module_name=child_name, preload_module_classes=preload_module_classes, skip_keys=skip_keys, tied_params_map=tied_params_map, ) class CpuOffload(ModelHook): """ Offloads a model on the CPU until its forward pass is called. The model will not be offloaded back to the CPU after the forward, the user needs to call the `init_hook` method again for this. Args: execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by [`cpu_offload_with_hook`] for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. """ def __init__( self, execution_device: Optional[Union[str, int, torch.device]] = None, prev_module_hook: Optional["UserCpuOffloadHook"] = None, ): self.prev_module_hook = prev_module_hook self.execution_device = execution_device if execution_device is not None else PartialState().default_device def init_hook(self, module): return module.to("cpu") def pre_forward(self, module, *args, **kwargs): if self.prev_module_hook is not None: self.prev_module_hook.offload() clear_device_cache() module.to(self.execution_device) return send_to_device(args, self.execution_device), send_to_device(kwargs, self.execution_device) class UserCpuOffloadHook: """ A simple hook grouping a model and a `ModelHook`, which provides easy APIs for to call the init method of the hook or remove it entirely. """ def __init__(self, model, hook): self.model = model self.hook = hook def offload(self): self.hook.init_hook(self.model) def remove(self): remove_hook_from_module(self.model)
accelerate/src/accelerate/hooks.py/0
{ "file_path": "accelerate/src/accelerate/hooks.py", "repo_id": "accelerate", "token_count": 13037 }
7
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): """ Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): """ tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.XLA: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader def training_function(config, args): # Initialize accelerator accelerator = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) model_name = args.model_name_or_path set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size, model_name) # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained(model_name, return_dict=True) # Instantiate optimizer optimizer_cls = ( AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(params=model.parameters(), lr=lr) max_training_steps = len(train_dataloader) * num_epochs # Instantiate scheduler linear_decay_scheduler = False if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=max_training_steps, ) linear_decay_scheduler = True else: lr_scheduler = DummyScheduler(optimizer, total_num_steps=max_training_steps, warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We also need to keep track of the stating epoch so files are named properly starting_epoch = 0 # Now we train the model metric = evaluate.load("glue", "mrpc") best_performance = 0 performance_metric = {} expected_lr_after_first_optim_step = lr * ( 1 - 1 / (max_training_steps / accelerator.num_processes / accelerator.gradient_accumulation_steps) ) lr_scheduler_check_completed = False for epoch in range(starting_epoch, num_epochs): model.train() for step, batch in enumerate(train_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # assert the learning rate after first optimizer step if ( accelerator.sync_gradients and not lr_scheduler_check_completed and linear_decay_scheduler and accelerator.state.mixed_precision == "no" ): assert ( lr_scheduler.get_last_lr()[0] == expected_lr_after_first_optim_step ), f"Wrong lr found at second step, expected {expected_lr_after_first_optim_step}, got {lr_scheduler.get_last_lr()[0]}" lr_scheduler_check_completed = True model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) # It is slightly faster to call this once, than multiple times predictions, references = accelerator.gather( (predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(eval_dataloader) - 1: predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"] if best_performance < eval_metric["accuracy"]: best_performance = eval_metric["accuracy"] # check that the LR is 0 if linear_decay_scheduler and accelerator.state.mixed_precision == "no": assert ( lr_scheduler.get_last_lr()[0] == 0 ), f"Wrong lr found at last step, expected 0, got {lr_scheduler.get_last_lr()[0]}" if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(performance_metric, f) accelerator.end_training() def main(): parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.") parser.add_argument( "--model_name_or_path", type=str, default="bert-base-cased", help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--performance_lower_bound", type=float, default=None, help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.", ) parser.add_argument( "--num_epochs", type=int, default=3, help="Number of train epochs.", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_performance.py", "repo_id": "accelerate", "token_count": 3917 }
8
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import operator as op SCALER_NAME = "scaler.pt" MODEL_NAME = "pytorch_model" SAFE_MODEL_NAME = "model" RNG_STATE_NAME = "random_states" OPTIMIZER_NAME = "optimizer" SCHEDULER_NAME = "scheduler" SAMPLER_NAME = "sampler" PROFILE_PATTERN_NAME = "profile_{suffix}.json" WEIGHTS_NAME = f"{MODEL_NAME}.bin" WEIGHTS_PATTERN_NAME = "pytorch_model{suffix}.bin" WEIGHTS_INDEX_NAME = f"{WEIGHTS_NAME}.index.json" SAFE_WEIGHTS_NAME = f"{SAFE_MODEL_NAME}.safetensors" SAFE_WEIGHTS_PATTERN_NAME = "model{suffix}.safetensors" SAFE_WEIGHTS_INDEX_NAME = f"{SAFE_WEIGHTS_NAME}.index.json" SAGEMAKER_PYTORCH_VERSION = "1.10.2" SAGEMAKER_PYTHON_VERSION = "py38" SAGEMAKER_TRANSFORMERS_VERSION = "4.17.0" SAGEMAKER_PARALLEL_EC2_INSTANCES = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"] FSDP_SHARDING_STRATEGY = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"] FSDP_AUTO_WRAP_POLICY = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"] FSDP_BACKWARD_PREFETCH = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"] FSDP_STATE_DICT_TYPE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] FSDP_PYTORCH_VERSION = ( "2.1.0.a0+32f93b1" # Technically should be 2.1.0, but MS-AMP uses this specific prerelease in their Docker image. ) FSDP_MODEL_NAME = "pytorch_model_fsdp" DEEPSPEED_MULTINODE_LAUNCHERS = ["pdsh", "standard", "openmpi", "mvapich", "mpich"] TORCH_DYNAMO_MODES = ["default", "reduce-overhead", "max-autotune"] ELASTIC_LOG_LINE_PREFIX_TEMPLATE_PYTORCH_VERSION = "2.2.0" STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 TORCH_LAUNCH_PARAMS = [ "nnodes", "nproc_per_node", "rdzv_backend", "rdzv_endpoint", "rdzv_id", "rdzv_conf", "standalone", "max_restarts", "monitor_interval", "start_method", "role", "module", "m", "no_python", "run_path", "log_dir", "r", "redirects", "t", "tee", "node_rank", "master_addr", "master_port", ] CUDA_DISTRIBUTED_TYPES = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"] TORCH_DISTRIBUTED_OPERATION_TYPES = CUDA_DISTRIBUTED_TYPES + [ "MULTI_NPU", "MULTI_MLU", "MULTI_MUSA", "MULTI_XPU", "MULTI_CPU", ]
accelerate/src/accelerate/utils/constants.py/0
{ "file_path": "accelerate/src/accelerate/utils/constants.py", "repo_id": "accelerate", "token_count": 1295 }
9
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def tqdm(*args, main_process_only: bool = True, **kwargs): """ Wrapper around `tqdm.tqdm` that optionally displays only on the main process. Args: main_process_only (`bool`, *optional*): Whether to display the progress bar only on the main process """ if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.") if len(args) > 0 and isinstance(args[0], bool): warnings.warn( f"Passing `{args[0]}` as the first argument to Accelerate's `tqdm` wrapper is deprecated " "and will be removed in v0.33.0. Please use the `main_process_only` keyword argument instead.", FutureWarning, ) main_process_only = args[0] args = args[1:] disable = kwargs.pop("disable", False) if main_process_only and not disable: disable = PartialState().local_process_index != 0 return _tqdm(*args, **kwargs, disable=disable)
accelerate/src/accelerate/utils/tqdm.py/0
{ "file_path": "accelerate/src/accelerate/utils/tqdm.py", "repo_id": "accelerate", "token_count": 616 }
10
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import torch import torch.nn as nn from accelerate import Accelerator, init_empty_weights from accelerate.test_utils import ( require_bnb, require_cuda, require_huggingface_suite, require_multi_gpu, require_non_torch_xla, slow, ) from accelerate.utils.bnb import load_and_quantize_model from accelerate.utils.dataclasses import BnbQuantizationConfig class BitsAndBytesConfigIntegration(unittest.TestCase): def test_BnbQuantizationConfig(self): with self.assertRaises(ValueError): BnbQuantizationConfig(load_in_8bit=True, load_in_4bit=True) @require_non_torch_xla @slow @require_cuda @require_bnb @require_huggingface_suite class MixedInt8EmptyModelTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a Quadro RTX 8000 so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 1.540025 input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" MAX_NEW_TOKENS = 10 def setUp(self): """ Setup quantized model from empty model """ from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) # create model on meta device with init_empty_weights(): self.model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) self.model_8bit.tie_weights() self.weights_location = hf_hub_download(self.model_name, "pytorch_model.bin") self.bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) self.model_8bit = load_and_quantize_model( self.model_8bit, self.bnb_quantization_config, weights_location=self.weights_location, device_map={"": 0}, no_split_module_classes=["BloomBlock"], ) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") self.accelerate = Accelerator() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() assert round((mem_fp16 / mem_8bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0 assert self.model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): modules_not_converted = ( self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ) if name not in modules_not_converted: assert module.weight.dtype == torch.int8 def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ import bitsandbytes as bnb from transformers import AutoConfig, AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig( load_in_8bit=True, skip_modules=["lm_head", "transformer.word_embeddings"] ) with init_empty_weights(): model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model.tie_weights() model = load_and_quantize_model( model, bnb_quantization_config, weights_location=self.weights_location, device_map="auto", no_split_module_classes=["BloomBlock"], ) assert model.transformer.h[1].mlp.dense_4h_to_h.weight.dtype == torch.int8 assert isinstance(model.transformer.h[1].mlp.dense_4h_to_h, bnb.nn.Linear8bitLt) assert isinstance(model.lm_head, nn.Linear) assert model.lm_head.weight.dtype != torch.int8 def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) assert output_text == self.EXPECTED_OUTPUT def test_generate_quality(self): self.check_inference_correctness(self.model_8bit) def test_fp32_8bit_conversion(self): r""" Test whether it is possible to mix both `8bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoConfig, AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, keep_in_fp32_modules=["lm_head"]) with init_empty_weights(): model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model.tie_weights() model = load_and_quantize_model( model, bnb_quantization_config, weights_location=self.weights_location, device_map="auto", no_split_module_classes=["BloomBlock"], ) assert model.lm_head.weight.dtype == torch.float32 @require_multi_gpu def test_cpu_gpu_loading_custom_device_map(self): from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) with init_empty_weights(): model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit.tie_weights() model_8bit = load_and_quantize_model( model_8bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], ) assert model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params assert model_8bit.transformer.h[1].mlp.dense_4h_to_h.weight.__class__ == Int8Params self.check_inference_correctness(model_8bit) @require_multi_gpu def test_cpu_gpu_loading_custom_device_map_offload_state_dict(self): from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map` and offload_state_dict=True. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) with init_empty_weights(): model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit.tie_weights() model_8bit = load_and_quantize_model( model_8bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], offload_state_dict=True, ) assert model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params assert model_8bit.transformer.h[1].mlp.dense_4h_to_h.weight.__class__ == Int8Params self.check_inference_correctness(model_8bit) @require_multi_gpu def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.h.5": "disk", "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) with init_empty_weights(): model_8bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit.tie_weights() with tempfile.TemporaryDirectory() as tmpdirname: model_8bit = load_and_quantize_model( model_8bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], offload_folder=tmpdirname, offload_state_dict=True, ) assert model_8bit.transformer.h[4].mlp.dense_4h_to_h.weight.__class__ == Int8Params assert model_8bit.transformer.h[5].mlp.dense_4h_to_h.weight.__class__ == Int8Params self.check_inference_correctness(model_8bit) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM with tempfile.TemporaryDirectory() as tmpdirname: # saving state dict for now but will save config and other in the future self.accelerate.save_model(self.model_8bit, tmpdirname) with init_empty_weights(): # let's suppose that we can get the right config model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit_from_saved.tie_weights() bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) model_8bit_from_saved = load_and_quantize_model( model_8bit_from_saved, bnb_quantization_config, weights_location=tmpdirname, device_map="auto", no_split_module_classes=["BloomBlock"], ) assert model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "SCB") assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "CB") self.check_inference_correctness(model_8bit_from_saved) @require_multi_gpu def test_int8_serialization_offload(self): r""" Test whether it is possible to serialize a model in 8-bit and offload weights to cpu/disk """ from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM with tempfile.TemporaryDirectory() as tmpdirname: # saving state dict for now but will save config and other in the future self.accelerate.save_model(self.model_8bit, tmpdirname) with init_empty_weights(): # let's suppose that we can get the right config model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit_from_saved.tie_weights() bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.h.5": "disk", "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } model_8bit_from_saved = load_and_quantize_model( model_8bit_from_saved, bnb_quantization_config, weights_location=tmpdirname, device_map=device_map, no_split_module_classes=["BloomBlock"], offload_folder=tmpdirname + "/tmp", offload_state_dict=True, ) assert model_8bit_from_saved.transformer.h[4].mlp.dense_4h_to_h.weight.__class__ == Int8Params assert model_8bit_from_saved.transformer.h[5].mlp.dense_4h_to_h.weight.__class__ == Int8Params self.check_inference_correctness(model_8bit_from_saved) def test_int8_serialization_shard(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params from transformers import AutoConfig, AutoModelForCausalLM with tempfile.TemporaryDirectory() as tmpdirname: # saving state dict for now but will save config and other in the future self.accelerate.save_model(self.model_8bit, tmpdirname, max_shard_size="1GB") with init_empty_weights(): # let's suppose that we can get the right config model_8bit_from_saved = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_8bit_from_saved.tie_weights() bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) model_8bit_from_saved = load_and_quantize_model( model_8bit_from_saved, bnb_quantization_config, weights_location=tmpdirname, device_map="auto", no_split_module_classes=["BloomBlock"], ) assert model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "SCB") assert hasattr(model_8bit_from_saved.transformer.h[0].mlp.dense_4h_to_h.weight, "CB") self.check_inference_correctness(model_8bit_from_saved) @require_non_torch_xla @slow @require_cuda @require_bnb @require_huggingface_suite class MixedInt8LoaddedModelTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a Quadro RTX 8000 so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 1.540025 input_text = "Hello my name is" EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" MAX_NEW_TOKENS = 10 def setUp(self): """ Setup quantized model from loaded model """ from transformers import AutoModelForCausalLM, AutoTokenizer # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) self.model_8bit = load_and_quantize_model(self.model_8bit, self.bnb_quantization_config) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() assert round((mem_fp16 / mem_8bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0 assert self.model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): modules_not_converted = ( self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ) if name not in modules_not_converted: assert module.weight.dtype == torch.int8 def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate( input_ids=encoded_input["input_ids"].to(self.model_8bit.device), max_new_tokens=10 ) assert self.tokenizer.decode(output_sequences[0], skip_special_tokens=True) == self.EXPECTED_OUTPUT def test_fp32_8bit_conversion(self): r""" Test whether it is possible to mix both `8bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_8bit=True, keep_in_fp32_modules=["lm_head"]) model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) model = load_and_quantize_model(model, bnb_quantization_config) assert model.lm_head.weight.dtype == torch.float32 @require_non_torch_xla @slow @require_cuda @require_bnb @require_huggingface_suite class Bnb4BitEmptyModelTest(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a RTX Titan so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 2.109659552692574 input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") MAX_NEW_TOKENS = 10 def setUp(self): from huggingface_hub import hf_hub_download from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) # create model on meta device with init_empty_weights(): self.model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) self.model_4bit.tie_weights() self.weights_location = hf_hub_download(self.model_name, "pytorch_model.bin") self.bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) self.model_4bit = load_and_quantize_model( self.model_4bit, self.bnb_quantization_config, weights_location=self.weights_location, device_map={"": 0}, no_split_module_classes=["BloomBlock"], ) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") def tearDown(self): """ TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ super().tearDown() del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() assert round((mem_fp16 / mem_4bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0 assert self.model_4bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Params4bit def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) assert self.tokenizer.decode(output_sequences[0], skip_special_tokens=True) in self.EXPECTED_OUTPUTS def test_generate_quality(self): self.check_inference_correctness(self.model_4bit) def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if ( name not in self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ): # 4-bit parameters are packed in uint8 variables assert module.weight.dtype == torch.uint8 def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoConfig, AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, keep_in_fp32_modules=["lm_head"]) with init_empty_weights(): model = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model.tie_weights() model = load_and_quantize_model( model, bnb_quantization_config, weights_location=self.weights_location, device_map="auto", no_split_module_classes=["BloomBlock"], ) assert model.lm_head.weight.dtype == torch.float32 @require_multi_gpu def test_cpu_gpu_loading_random_device_map(self): from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": 0, "lm_head": "cpu", "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) with init_empty_weights(): model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_4bit.tie_weights() model_4bit = load_and_quantize_model( model_4bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], ) self.check_inference_correctness(model_4bit) @require_multi_gpu def test_cpu_gpu_loading_custom_device_map(self): from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) with init_empty_weights(): model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_4bit.tie_weights() model_4bit = load_and_quantize_model( model_4bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], ) self.check_inference_correctness(model_4bit) @require_multi_gpu def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): from transformers import AutoConfig, AutoModelForCausalLM r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "disk", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "cpu", } bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) with init_empty_weights(): model_4bit = AutoModelForCausalLM.from_config(AutoConfig.from_pretrained(self.model_name)) model_4bit.tie_weights() with tempfile.TemporaryDirectory() as tmpdirname: model_4bit = load_and_quantize_model( model_4bit, bnb_quantization_config, weights_location=self.weights_location, device_map=device_map, no_split_module_classes=["BloomBlock"], offload_folder=tmpdirname, offload_state_dict=True, ) self.check_inference_correctness(model_4bit) @require_non_torch_xla @slow @require_cuda @require_bnb @require_huggingface_suite class Bnb4BitTestLoadedModel(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "marcsun13/bloom-1b7_with_lm_head" # Constant values # This was obtained on a RTX Titan so the number might slightly change EXPECTED_RELATIVE_DIFFERENCE = 2.109659552692574 input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") MAX_NEW_TOKENS = 10 def setUp(self): """ Setup quantized model from loaded model """ from transformers import AutoModelForCausalLM, AutoTokenizer super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True) self.model_4bit = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) self.model_4bit = load_and_quantize_model(self.model_4bit, self.bnb_quantization_config) self.tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-1b7") def tearDown(self): """ TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ super().tearDown() del self.model_fp16 del self.model_4bit gc.collect() torch.cuda.empty_cache() def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Params4bit mem_fp16 = self.model_fp16.get_memory_footprint() mem_4bit = self.model_4bit.get_memory_footprint() assert round((mem_fp16 / mem_4bit) - self.EXPECTED_RELATIVE_DIFFERENCE, 7) >= 0 assert self.model_4bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Params4bit def test_linear_are_4bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ self.model_fp16.get_memory_footprint() self.model_4bit.get_memory_footprint() for name, module in self.model_4bit.named_modules(): if isinstance(module, torch.nn.Linear): if ( name not in self.bnb_quantization_config.keep_in_fp32_modules + self.bnb_quantization_config.skip_modules ): # 4-bit parameters are packed in uint8 variables assert module.weight.dtype == torch.uint8 def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_4bit.generate( input_ids=encoded_input["input_ids"].to(self.model_4bit.device), max_new_tokens=10 ) assert self.tokenizer.decode(output_sequences[0], skip_special_tokens=True) in self.EXPECTED_OUTPUTS def test_fp32_4bit_conversion(self): r""" Test whether it is possible to mix both `4bit` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ from transformers import AutoModelForCausalLM bnb_quantization_config = BnbQuantizationConfig(load_in_4bit=True, keep_in_fp32_modules=["lm_head"]) model = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype=torch.float16) model = load_and_quantize_model(model, bnb_quantization_config) assert model.lm_head.weight.dtype == torch.float32
accelerate/tests/test_quantization.py/0
{ "file_path": "accelerate/tests/test_quantization.py", "repo_id": "accelerate", "token_count": 17682 }
11
<p align="center"> <img src="https://raw.githubusercontent.com/huggingface/alignment-handbook/main/assets/handbook.png"> </p> <p align="center"> 🤗 <a href="https://huggingface.co/collections/alignment-handbook/handbook-v01-models-and-datasets-654e424d22e6880da5ebc015" target="_blank">Models & Datasets</a> | 📃 <a href="https://arxiv.org/abs/2310.16944" target="_blank">Technical Report</a> </p> # The Alignment Handbook Robust recipes to continue pretraining and to align language models with human and AI preferences. ## What is this? Just one year ago, chatbots were out of fashion and most people hadn't heard about techniques like Reinforcement Learning from Human Feedback (RLHF) to align language models with human preferences. Then, OpenAI broke the internet with ChatGPT and Meta followed suit by releasing the Llama series of language models which enabled the ML community to build their very own capable chatbots. This has led to a rich ecosystem of datasets and models that have mostly focused on teaching language models to follow instructions through supervised fine-tuning (SFT). However, we know from the [InstructGPT](https://huggingface.co/papers/2203.02155) and [Llama2](https://huggingface.co/papers/2307.09288) papers that significant gains in helpfulness and safety can be had by augmenting SFT with human (or AI) preferences. At the same time, aligning language models to a set of preferences is a fairly novel idea and there are few public resources available on how to train these models, what data to collect, and what metrics to measure for best downstream performance. The Alignment Handbook aims to fill that gap by providing the community with a series of robust training recipes that span the whole pipeline. ## News 🗞️ * **August 18, 2024**: We release SmolLM-Instruct v0.2, along with the [recipe](recipes/smollm/README.md) to fine-tuning small LLMs 💻 * **April 12, 2024**: We release Zephyr 141B (A35B), in collaboration with Argilla and Kaist AI, along with the recipe to fine-tune Mixtral 8x22B with ORPO 🪁 * **March 12, 2024:** We release StarChat2 15B, along with the recipe to train capable coding assistants 🌟 * **March 1, 2024:** We release Zephyr 7B Gemma, which is a new recipe to align Gemma 7B with RLAIF 🔥 * **February 1, 2024:** We release a recipe to align open LLMs with Constitutional AI 📜! See the [recipe](https://github.com/huggingface/alignment-handbook/tree/main/recipes/constitutional-ai) and the [blog post](https://huggingface.co/blog/constitutional_ai) for details. * **January 18, 2024:** We release a suite of evaluations of DPO vs KTO vs IPO, see the [recipe](recipes/pref_align_scan/README.md) and the [blog post](https://huggingface.co/blog/pref-tuning) for details. * **November 10, 2023:** We release all the training code to replicate Zephyr-7b-β 🪁! We also release [No Robots](https://huggingface.co/datasets/HuggingFaceH4/no_robots), a brand new dataset of 10,000 instructions and demonstrations written entirely by skilled human annotators. ## Links 🔗 * [Zephyr 7B models, datasets, and demos](https://huggingface.co/collections/HuggingFaceH4/zephyr-7b-6538c6d6d5ddd1cbb1744a66) ## How to navigate this project 🧭 This project is simple by design and mostly consists of: * [`scripts`](./scripts/) to train and evaluate models. Four steps are included: continued pretraining, supervised-finetuning (SFT) for chat, preference alignment with DPO, and supervised-finetuning with preference alignment with ORPO. Each script supports distributed training of the full model weights with DeepSpeed ZeRO-3, or LoRA/QLoRA for parameter-efficient fine-tuning. * [`recipes`](./recipes/) to reproduce models like Zephyr 7B. Each recipe takes the form of a YAML file which contains all the parameters associated with a single training run. A `gpt2-nl` recipe is also given to illustrate how this handbook can be used for language or domain adaptation, e.g. by continuing to pretrain on a different language, and then SFT and DPO tuning the result. We are also working on a series of guides to explain how methods like direct preference optimization (DPO) work, along with lessons learned from gathering human preferences in practice. To get started, we recommend the following: 1. Follow the [installation instructions](#installation-instructions) to set up your environment etc. 2. Replicate Zephyr-7b-β by following the [recipe instructions](./recipes/zephyr-7b-beta/README.md). If you would like to train chat models on your own datasets, we recommend following the dataset formatting instructions [here](./scripts/README.md#fine-tuning-on-your-datasets). ## Contents The initial release of the handbook will focus on the following techniques: * **Continued pretraining:** adapt language models to a new language or domain, or simply improve it by continued pretraining (causal language modeling) on a new dataset. * **Supervised fine-tuning:** teach language models to follow instructions and tips on how to collect and curate your training dataset. * **Reward modeling:** teach language models to distinguish model responses according to human or AI preferences. * **Rejection sampling:** a simple, but powerful technique to boost the performance of your SFT model. * **Direct preference optimisation (DPO):** a powerful and promising alternative to PPO. * **Odds Ratio Preference Optimisation (ORPO)**: a technique to fine-tune language models with human preferences, combining SFT and DPO in a single stage. ## Installation instructions To run the code in this project, first, create a Python virtual environment using e.g. Conda: ```shell conda create -n handbook python=3.10 && conda activate handbook ``` Next, install PyTorch `v2.1.2` - the precise version is important for reproducibility! Since this is hardware-dependent, we direct you to the [PyTorch Installation Page](https://pytorch.org/get-started/locally/). You can then install the remaining package dependencies as follows: ```shell git clone https://github.com/huggingface/alignment-handbook.git cd ./alignment-handbook/ python -m pip install . ``` You will also need Flash Attention 2 installed, which can be done by running: ```shell python -m pip install flash-attn --no-build-isolation ``` > **Note** > If your machine has less than 96GB of RAM and many CPU cores, reduce the `MAX_JOBS` arguments, e.g. `MAX_JOBS=4 pip install flash-attn --no-build-isolation` Next, log into your Hugging Face account as follows: ```shell huggingface-cli login ``` Finally, install Git LFS so that you can push models to the Hugging Face Hub: ```shell sudo apt-get install git-lfs ``` You can now check out the `scripts` and `recipes` directories for instructions on how to train some models 🪁! ## Project structure ``` ├── LICENSE ├── Makefile <- Makefile with commands like `make style` ├── README.md <- The top-level README for developers using this project ├── chapters <- Educational content to render on hf.co/learn ├── recipes <- Recipe configs, accelerate configs, slurm scripts ├── scripts <- Scripts to train and evaluate chat models ├── setup.cfg <- Installation config (mostly used for configuring code quality & tests) ├── setup.py <- Makes project pip installable (pip install -e .) so `alignment` can be imported ├── src <- Source code for use in this project └── tests <- Unit tests ``` ## Citation If you find the content of this repo useful in your work, please cite it as follows via `\usepackage{biblatex}`: ```bibtex @software{Tunstall_The_Alignment_Handbook, author = {Tunstall, Lewis and Beeching, Edward and Lambert, Nathan and Rajani, Nazneen and Huang, Shengyi and Rasul, Kashif and Bartolome, Alvaro and M. Rush, Alexander and Wolf, Thomas}, license = {Apache-2.0}, title = {{The Alignment Handbook}}, url = {https://github.com/huggingface/alignment-handbook}, version = {0.3.0.dev0} } ```
alignment-handbook/README.md/0
{ "file_path": "alignment-handbook/README.md", "repo_id": "alignment-handbook", "token_count": 2416 }
12
#!/bin/bash #SBATCH --ntasks-per-node=1 #SBATCH --exclusive #SBATCH --gres=gpu:8 #SBATCH --partition=hopper-prod # Adjust this for your cluster #SBATCH --output=/fsx/h4/logs/%x-%j.out # Adjust this for your cluster #SBATCH --err=/fsx/h4/logs/%x-%j.err # Adjust this for your cluster set -x -e source ~/.bashrc conda activate handbook echo "START TIME: $(date)" MODEL=$1 TASK=$2 PRECISION=$3 ACCELERATOR=$4 OPTIONAL_ARGS=$5 # Training setup NUM_NODES=$SLURM_NNODES GPUS_PER_NODE=8 WORLD_SIZE=$(($NUM_NODES*$GPUS_PER_NODE)) # Due to conflicts between Accelerate's DeepSpeed configs and Transformers' TrainingArguments, we need to parse the gradient accumulation steps from the config file to ensure they match CONFIG_FILE=recipes/$MODEL/$TASK/config_$PRECISION.yaml GRAD_ACC_STEPS=$(grep 'gradient_accumulation_steps' $CONFIG_FILE | awk '{print $2}') # Split the string into individual arguments IFS=' ' read -ra ARGS <<< "$OPTIONAL_ARGS" # Loop through the arguments and find the one with "--gradient_accumulation_steps" for arg in "${ARGS[@]}"; do if [[ "$arg" == "--gradient_accumulation_steps="* ]]; then # Extract the value after the equals sign GRAD_ACC_STEPS="${arg#*=}" break # Exit the loop once we find the desired argument fi done echo "Gradient accumulation steps: $GRAD_ACC_STEPS" # so processes know who to talk to MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) MASTER_PORT=6000 export CMD=" \ scripts/run_$TASK.py $CONFIG_FILE $OPTIONAL_ARGS " export LAUNCHER="HF_HUB_ENABLE_HF_TRANSFER=1 ACCELERATE_LOG_LEVEL=info TRANSFORMERS_VERBOSITY=info accelerate launch \ --config_file recipes/accelerate_configs/$ACCELERATOR.yaml \ --gradient_accumulation_steps $GRAD_ACC_STEPS \ --num_machines $NUM_NODES \ --num_processes $WORLD_SIZE \ --main_process_ip $MASTER_ADDR \ --main_process_port $MASTER_PORT \ --machine_rank \$SLURM_PROCID \ --rdzv_conf "rdzv_backend=c10d,rdzv_endpoint=$MASTER_ADDR:$MASTER_PORT" \ --max_restarts 1 \ --role \$(hostname -s): \ --tee 3 \ " # force crashing on nccl issues like hanging broadcast export NCCL_ASYNC_ERROR_HANDLING=1 # export NCCL_DEBUG=INFO # export NCCL_DEBUG_SUBSYS=COLL # export NCCL_SOCKET_NTHREADS=1 # export NCCL_NSOCKS_PERTHREAD=1 # export CUDA_LAUNCH_BLOCKING=1 # Specific configuration optimized for the Hugging Face Compute Cluster # Be ye warned this may not work on other clusters! module load cuda/12.1 # srun error handling: # --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks # --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code SRUN_ARGS=" \ --wait=60 \ --kill-on-bad-exit=1 \ " clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$LAUNCHER --role \$SLURMD_NODENAME: $CMD" 2>&1 echo "END TIME: $(date)"
alignment-handbook/recipes/launch.slurm/0
{ "file_path": "alignment-handbook/recipes/launch.slurm", "repo_id": "alignment-handbook", "token_count": 1135 }
13
[workspace] members = [ "candle-core", "candle-datasets", "candle-examples", "candle-book", "candle-nn", "candle-pyo3", "candle-transformers", "candle-wasm-examples/*", "candle-wasm-tests", "tensor-tools", ] exclude = [ "candle-flash-attn", "candle-kernels", "candle-metal-kernels", "candle-onnx", ] resolver = "2" [workspace.package] version = "0.6.1" edition = "2021" description = "Minimalist ML framework." repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [workspace.dependencies] ab_glyph = "0.2.23" accelerate-src = { version = "0.3.2" } anyhow = { version = "1", features = ["backtrace"] } byteorder = "1.4.3" candle = { path = "./candle-core", package = "candle-core", version = "0.6.1" } candle-datasets = { path = "./candle-datasets", version = "0.6.1" } candle-flash-attn = { path = "./candle-flash-attn", version = "0.6.1" } candle-kernels = { path = "./candle-kernels", version = "0.6.1" } candle-metal-kernels = { path = "./candle-metal-kernels", version = "0.6.1" } candle-nn = { path = "./candle-nn", version = "0.6.1" } candle-onnx = { path = "./candle-onnx", version = "0.6.1" } candle-transformers = { path = "./candle-transformers", version = "0.6.1" } clap = { version = "4.2.4", features = ["derive"] } criterion = { version = "0.5.1", default-features=false } cudarc = { version = "0.12.0", features = ["std", "cublas", "cublaslt", "curand", "driver", "nvrtc", "f16", "cuda-version-from-build-system", "dynamic-linking"], default-features=false } fancy-regex = "0.13.0" gemm = { version = "0.17.0", features = ["wasm-simd128-enable"] } hf-hub = "0.3.0" half = { version = "2.3.1", features = ["num-traits", "use-intrinsics", "rand_distr"] } hound = "3.5.1" image = { version = "0.25.2", default-features = false, features = ["jpeg", "png"] } imageproc = { version = "0.24.0", default-features = false } intel-mkl-src = { version = "0.8.1", features = ["mkl-static-lp64-iomp"] } libc = { version = "0.2.147" } log = "0.4" memmap2 = { version = "0.9.3", features = ["stable_deref_trait"] } num_cpus = "1.15.0" num-traits = "0.2.15" parquet = { version = "51.0.0" } rand = "0.8.5" rand_distr = "0.4.3" rayon = "1.7.0" safetensors = "0.4.1" serde = { version = "1.0.171", features = ["derive"] } serde_plain = "1.0.2" serde_json = "1.0.99" thiserror = "1" tokenizers = { version = "0.19.1", default-features = false } tracing = "0.1.37" tracing-chrome = "0.7.1" tracing-subscriber = "0.3.7" yoke = { version = "0.7.2", features = ["derive"] } zip = { version = "1.1.1", default-features = false } metal = { version = "0.27.0", features = ["mps"]} [profile.release-with-debug] inherits = "release" debug = true
candle/Cargo.toml/0
{ "file_path": "candle/Cargo.toml", "repo_id": "candle", "token_count": 1211 }
14
# Advanced Cuda usage
candle/candle-book/src/cuda/README.md/0
{ "file_path": "candle/candle-book/src/cuda/README.md", "repo_id": "candle", "token_count": 6 }
15
# Serialization
candle/candle-book/src/training/serialization.md/0
{ "file_path": "candle/candle-book/src/training/serialization.md", "repo_id": "candle", "token_count": 4 }
16
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { let device = Device::new_cuda(0)?; let x = Tensor::randn(0f32, 1.0, (8 * 4096, 8 * 4096), &device)? .to_dtype(candle_core::DType::BF16)?; candle_core::cuda::set_gemm_reduced_precision_f32(false); candle_core::cuda::set_gemm_reduced_precision_bf16(false); let _x1 = x.matmul(&x)?; drop(_x1); let start_time = std::time::Instant::now(); let _x1 = x.matmul(&x)?; device.synchronize()?; println!("fp32: {:?}", start_time.elapsed()); drop(_x1); candle_core::cuda::set_gemm_reduced_precision_f32(true); candle_core::cuda::set_gemm_reduced_precision_bf16(true); let _x1 = x.matmul(&x)?; drop(_x1); let start_time = std::time::Instant::now(); let _x1 = x.matmul(&x)?; device.synchronize()?; println!("tf32: {:?}", start_time.elapsed()); drop(_x1); Ok(()) }
candle/candle-core/examples/cuda_basics.rs/0
{ "file_path": "candle/candle-core/examples/cuda_basics.rs", "repo_id": "candle", "token_count": 477 }
17
use crate::WithDType; use cudarc; use cudarc::cudnn::safe::{ConvForward, Cudnn}; use cudarc::driver::{CudaSlice, CudaView, DeviceRepr, ValidAsZeroBits}; use std::cell::RefCell; use std::collections::HashMap; use std::sync::Arc; // The cudnn handles are stored per thread here rather than on the CudaDevice as they are neither // send nor sync. thread_local! { static CUDNN: RefCell<HashMap<crate::cuda_backend::DeviceId, Arc<Cudnn>>> = HashMap::new().into(); } impl From<cudarc::cudnn::CudnnError> for crate::Error { fn from(err: cudarc::cudnn::CudnnError) -> Self { crate::Error::wrap(err) } } impl From<cudarc::driver::DriverError> for crate::Error { fn from(err: cudarc::driver::DriverError) -> Self { crate::Error::wrap(err) } } pub(crate) fn launch_conv2d< T: DeviceRepr + WithDType + ValidAsZeroBits + cudarc::cudnn::CudnnDataType, >( src: &CudaView<T>, src_l: &crate::Layout, filter: &CudaView<T>, dst: &mut CudaSlice<T>, params: &crate::conv::ParamsConv2D, dev: &crate::cuda_backend::CudaDevice, ) -> crate::Result<()> { use crate::conv::CudnnFwdAlgo as CandleAlgo; use cudarc::cudnn::sys::cudnnConvolutionFwdAlgo_t as A; let device_id = dev.id(); let cudnn = CUDNN.with(|cudnn| { if let Some(cudnn) = cudnn.borrow().get(&device_id) { return Ok(cudnn.clone()); } let c = Cudnn::new(dev.cuda_device()); if let Ok(c) = &c { cudnn.borrow_mut().insert(device_id, c.clone()); } c })?; let conv = cudnn.create_conv2d::<T>( /* pad */ [params.padding as i32, params.padding as i32], /* stride */ [params.stride as i32, params.stride as i32], /* dilation */ [params.dilation as i32, params.dilation as i32], cudarc::cudnn::sys::cudnnConvolutionMode_t::CUDNN_CROSS_CORRELATION, )?; let x_shape = [ params.b_size as i32, params.c_in as i32, params.i_h as i32, params.i_w as i32, ]; // Note that `src` already starts at the proper offset. let x = if src_l.is_contiguous() { cudnn.create_4d_tensor( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, x_shape, )? } else { let s = src_l.stride(); cudnn.create_4d_tensor_ex( x_shape, [s[0] as i32, s[1] as i32, s[2] as i32, s[3] as i32], )? }; let w = cudnn.create_4d_filter( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [ params.c_out as i32, params.c_in as i32, params.k_h as i32, params.k_w as i32, ], )?; let (w_out, h_out) = (params.out_w() as i32, params.out_h() as i32); let y = cudnn.create_4d_tensor( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [params.b_size as i32, params.c_out as i32, h_out, w_out], )?; let conv2d = ConvForward { conv: &conv, x: &x, w: &w, y: &y, }; let alg = match params.cudnn_fwd_algo { None => conv2d.pick_algorithm()?, Some(CandleAlgo::ImplicitGemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, Some(CandleAlgo::ImplicitPrecompGemm) => { A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM } Some(CandleAlgo::Gemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_GEMM, Some(CandleAlgo::Direct) => A::CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, Some(CandleAlgo::Fft) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT, Some(CandleAlgo::FftTiling) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, Some(CandleAlgo::Winograd) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, Some(CandleAlgo::WinogradNonFused) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, Some(CandleAlgo::Count) => A::CUDNN_CONVOLUTION_FWD_ALGO_COUNT, }; let workspace_size = conv2d.get_workspace_size(alg)?; let mut workspace = dev.cuda_device().alloc_zeros::<u8>(workspace_size)?; unsafe { conv2d.launch::<CudaSlice<u8>, _, _, _>( alg, Some(&mut workspace), (T::one(), T::zero()), src, filter, dst, )?; } Ok(()) }
candle/candle-core/src/cuda_backend/cudnn.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/cudnn.rs", "repo_id": "candle", "token_count": 2210 }
18
use crate::backend::{BackendDevice, BackendStorage}; use crate::conv::{ParamsConv1D, ParamsConv2D, ParamsConvTranspose1D, ParamsConvTranspose2D}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, CpuStorageRef, DType, Layout, Result, Shape}; use candle_metal_kernels::{BufferOffset, CallConvTranspose2dCfg, Kernels}; use metal::{Buffer, MTLResourceOptions, NSUInteger}; use std::collections::HashMap; use std::ffi::c_void; use std::sync::{Arc, Mutex, PoisonError, RwLock, TryLockError}; mod device; pub use device::{DeviceId, MetalDevice}; pub fn buffer_o<'a>(buffer: &'a Buffer, l: &Layout, dtype: DType) -> BufferOffset<'a> { BufferOffset { buffer, offset_in_bytes: l.start_offset() * dtype.size_in_bytes(), } } /// Simple way to catch lock error without /// depending on T #[derive(thiserror::Error, Debug)] pub enum LockError { #[error("{0}")] Poisoned(String), #[error("Would block")] WouldBlock, } impl<T> From<TryLockError<T>> for MetalError { fn from(value: TryLockError<T>) -> Self { match value { TryLockError::Poisoned(p) => MetalError::LockError(LockError::Poisoned(p.to_string())), TryLockError::WouldBlock => MetalError::LockError(LockError::WouldBlock), } } } impl<T> From<PoisonError<T>> for MetalError { fn from(p: PoisonError<T>) -> Self { MetalError::LockError(LockError::Poisoned(p.to_string())) } } /// Metal related errors #[derive(thiserror::Error, Debug)] pub enum MetalError { #[error("{0}")] Message(String), #[error(transparent)] KernelError(#[from] candle_metal_kernels::MetalKernelError), #[error("{0:?}")] LockError(LockError), #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedDType { msg: &'static str, expected: DType, got: DType, }, } impl From<String> for MetalError { fn from(e: String) -> Self { MetalError::Message(e) } } #[derive(Debug, Clone)] pub struct MetalStorage { /// The actual buffer containing the data. buffer: Arc<metal::Buffer>, /// a reference to the device owning this buffer device: MetalDevice, /// The count of allocated elements in the buffer count: usize, /// The dtype is kept since buffers are untyped. dtype: DType, } impl BackendStorage for MetalStorage { type Device = MetalDevice; fn try_clone(&self, _: &Layout) -> Result<Self> { Ok(self.clone()) } fn dtype(&self) -> DType { self.dtype } fn device(&self) -> &Self::Device { &self.device } fn to_cpu_storage(&self) -> Result<CpuStorage> { match self.dtype { DType::U8 => Ok(CpuStorage::U8(self.to_cpu()?)), DType::U32 => Ok(CpuStorage::U32(self.to_cpu()?)), DType::I64 => Ok(CpuStorage::I64(self.to_cpu()?)), DType::F16 => Ok(CpuStorage::F16(self.to_cpu()?)), DType::BF16 => Ok(CpuStorage::BF16(self.to_cpu()?)), DType::F32 => Ok(CpuStorage::F32(self.to_cpu()?)), DType::F64 => Ok(CpuStorage::F64(self.to_cpu()?)), } } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let el = shape.elem_count(); let dtype = self.dtype; let buffer = device.new_buffer(el, self.dtype, "affine")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, dtype); if layout.is_contiguous() { let name = match self.dtype { DType::F32 => "affine_f32", DType::F16 => "affine_f16", DType::BF16 => "affine_bf16", DType::U8 => "affine_u8", DType::U32 => "affine_u32", dtype => crate::bail!("Metal contiguous affine {dtype:?} not implemented"), }; candle_metal_kernels::call_affine( &device.device, &command_buffer, &device.kernels, name, el, src, &buffer, mul as f32, add as f32, ) .map_err(MetalError::from)?; } else { let name = match self.dtype { DType::F32 => "affine_f32_strided", DType::F16 => "affine_f16_strided", DType::BF16 => "affine_bf16_strided", dtype => crate::bail!("Metal strided affine {dtype:?} not implemented"), }; candle_metal_kernels::call_affine_strided( &device.device, &command_buffer, &device.kernels, name, layout.dims(), src, layout.stride(), &buffer, mul as f32, add as f32, ) .map_err(MetalError::from)?; } Ok(Self::new(buffer, device.clone(), el, dtype)) } fn powf(&self, layout: &Layout, pow: f64) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let el = shape.elem_count(); let dtype = self.dtype; let buffer = device.new_buffer(el, self.dtype, "powf")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, dtype); if layout.is_contiguous() { let name = match self.dtype { DType::F32 => "powf_f32", DType::F16 => "powf_f16", DType::BF16 => "powf_bf16", dtype => crate::bail!("Metal contiguous powf {dtype:?} not implemented"), }; candle_metal_kernels::call_powf( &device.device, &command_buffer, &device.kernels, name, el, src, &buffer, pow as f32, ) .map_err(MetalError::from)?; } else { let name = match self.dtype { DType::F32 => "powf_f32_strided", DType::F16 => "powf_f16_strided", DType::BF16 => "powf_bf16_strided", dtype => crate::bail!("Metal strided powf {dtype:?} not implemented"), }; candle_metal_kernels::call_powf_strided( &device.device, &command_buffer, &device.kernels, name, layout.dims(), src, layout.stride(), &buffer, pow as f32, ) .map_err(MetalError::from)?; } Ok(Self::new(buffer, device.clone(), el, dtype)) } fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let el = shape.elem_count(); let dtype = self.dtype; let buffer = device.new_buffer(el, self.dtype, "elu")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, self.dtype); if layout.is_contiguous() { let name = match self.dtype { DType::F32 => "elu_f32", DType::F16 => "elu_f16", DType::BF16 => "elu_bf16", dtype => crate::bail!("Metal contiguous elu {dtype:?} not implemented"), }; candle_metal_kernels::call_elu( &device.device, &command_buffer, &device.kernels, name, el, src, &buffer, alpha as f32, ) .map_err(MetalError::from)?; } else { let name = match self.dtype { DType::F32 => "elu_f32_strided", DType::F16 => "elu_f16_strided", DType::BF16 => "elu_bf16_strided", dtype => crate::bail!("Metal strided elu {dtype:?} not implemented"), }; candle_metal_kernels::call_elu_strided( &device.device, &command_buffer, &device.kernels, name, layout.dims(), src, layout.stride(), &buffer, alpha as f32, ) .map_err(MetalError::from)?; } Ok(Self::new(buffer, device.clone(), el, dtype)) } fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> { let device = self.device.clone(); let src_stride = layout.stride(); let src_dims = layout.shape().dims(); // Source dims and strides with the sum dims at the end. let mut dims = vec![]; let mut stride = vec![]; let mut dst_el: usize = 1; for (dim_idx, &d) in src_dims.iter().enumerate() { if !sum_dims.contains(&dim_idx) { dst_el *= d; dims.push(d); stride.push(src_stride[dim_idx]); } } for &dim_idx in sum_dims.iter() { dims.push(src_dims[dim_idx]); stride.push(src_stride[dim_idx]); } // The reduction loop requires the shared array to be properly initialized and for // this we want the number of threads to be a power of two. let (name, check_empty, return_index) = match (op, self.dtype) { (ReduceOp::Sum, DType::F32) => ("fast_sum_f32_strided", false, false), (ReduceOp::Min, DType::F32) => ("fast_min_f32_strided", true, false), (ReduceOp::Max, DType::F32) => ("fast_max_f32_strided", true, false), (ReduceOp::ArgMin, DType::F32) => ("fast_argmin_f32_strided", true, true), (ReduceOp::ArgMax, DType::F32) => ("fast_argmax_f32_strided", true, true), (ReduceOp::Sum, DType::U32) => ("fast_sum_u32_strided", false, false), (ReduceOp::Min, DType::U32) => ("fast_min_u32_strided", true, false), (ReduceOp::Max, DType::U32) => ("fast_max_u32_strided", true, false), (ReduceOp::ArgMin, DType::U32) => ("fast_argmin_u32_strided", true, true), (ReduceOp::ArgMax, DType::U32) => ("fast_argmax_u32_strided", true, true), (ReduceOp::Sum, DType::F16) => ("fast_sum_f16_strided", false, false), (ReduceOp::Min, DType::F16) => ("fast_min_f16_strided", true, false), (ReduceOp::Max, DType::F16) => ("fast_max_f16_strided", true, false), (ReduceOp::ArgMin, DType::F16) => ("fast_argmin_f16_strided", true, true), (ReduceOp::ArgMax, DType::F16) => ("fast_argmax_f16_strided", true, true), (ReduceOp::Sum, DType::BF16) => ("fast_sum_bf16_strided", false, false), (ReduceOp::Min, DType::BF16) => ("fast_min_bf16_strided", true, false), (ReduceOp::Max, DType::BF16) => ("fast_max_bf16_strided", true, false), (ReduceOp::ArgMin, DType::BF16) => ("fast_argmin_bf16_strided", true, true), (ReduceOp::ArgMax, DType::BF16) => ("fast_argmax_bf16_strided", true, true), (ReduceOp::Sum, DType::I64) => ("fast_sum_i64_strided", false, false), (ReduceOp::Min, DType::I64) => ("fast_min_i64_strided", true, false), (ReduceOp::Max, DType::I64) => ("fast_max_i64_strided", true, false), (ReduceOp::ArgMin, DType::I64) => ("fast_argmin_i64_strided", true, true), (ReduceOp::ArgMax, DType::I64) => ("fast_argmax_i64_strided", true, true), (ReduceOp::Sum, DType::U8) => ("fast_sum_u8_strided", false, false), (ReduceOp::Min, DType::U8) => ("fast_min_u8_strided", true, false), (ReduceOp::Max, DType::U8) => ("fast_max_u8_strided", true, false), (ReduceOp::ArgMin, DType::U8) => ("fast_argmin_u8_strided", true, true), (ReduceOp::ArgMax, DType::U8) => ("fast_argmax_u8_strided", true, true), (k, dtype) => crate::bail!("Metal reduce op {k:?} {dtype:?} not implemented"), }; if check_empty && layout.shape().elem_count() == 0 { Err(crate::Error::EmptyTensor { op: "reduce" }.bt())? } let dtype = if return_index { DType::U32 } else { self.dtype }; let buffer = device.new_buffer(dst_el, dtype, "reduce")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, layout, self.dtype); candle_metal_kernels::call_reduce_strided( &device.device, &command_buffer, &device.kernels, name, &dims, &stride, dst_el, src, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device, dst_el, dtype)) } fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> { let name = match op { CmpOp::Eq => "eq", CmpOp::Ne => "ne", CmpOp::Le => "le", CmpOp::Ge => "ge", CmpOp::Lt => "lt", CmpOp::Gt => "gt", }; self.binary(name, rhs, lhs_l, rhs_l) } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { let device = self.device(); let shape = layout.shape(); let el_count = shape.elem_count(); let buffer = device.new_buffer(el_count, dtype, "todtype")?; let command_buffer = device.command_buffer()?; let src = buffer_o(&self.buffer, layout, self.dtype); if layout.is_contiguous() { let kernel_name = match (self.dtype, dtype) { (DType::U32, DType::BF16) => "cast_u32_bf16", (DType::U32, DType::F16) => "cast_u32_f16", (DType::U32, DType::F32) => "cast_u32_f32", (DType::U32, DType::I64) => "cast_u32_i64", (DType::U32, DType::U8) => "cast_u32_u8", (DType::U8, DType::BF16) => "cast_u8_bf16", (DType::U8, DType::F16) => "cast_u8_f16", (DType::U8, DType::F32) => "cast_u8_f32", (DType::U8, DType::I64) => "cast_u8_i64", (DType::U8, DType::U32) => "cast_u8_u32", (DType::F32, DType::BF16) => "cast_f32_bf16", (DType::F32, DType::F16) => "cast_f32_f16", (DType::F32, DType::I64) => "cast_f32_i64", (DType::F32, DType::U32) => "cast_f32_u32", (DType::F32, DType::U8) => "cast_f32_u8", (DType::I64, DType::BF16) => "cast_i64_bf16", (DType::I64, DType::F16) => "cast_i64_f16", (DType::I64, DType::F32) => "cast_i64_f32", (DType::I64, DType::U32) => "cast_i64_u32", (DType::I64, DType::U8) => "cast_i64_u8", (DType::F16, DType::BF16) => "cast_f16_bf16", (DType::F16, DType::F32) => "cast_f16_f32", (DType::F16, DType::I64) => "cast_f16_i64", (DType::F16, DType::U32) => "cast_f16_u32", (DType::F16, DType::U8) => "cast_f16_u8", (DType::BF16, DType::F16) => "cast_bf16_f16", (DType::BF16, DType::F32) => "cast_bf16_f32", (DType::BF16, DType::I64) => "cast_bf16_i64", (DType::BF16, DType::U32) => "cast_bf16_u32", (DType::BF16, DType::U8) => "cast_bf16_u8", (left, right) => { crate::bail!("Metal contiguous to_dtype {left:?} {right:?} not implemented") } }; candle_metal_kernels::call_cast_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } else { let kernel_name = match (self.dtype, dtype) { (DType::U32, DType::F32) => "cast_u32_f32_strided", (DType::U32, DType::U8) => "cast_u32_u8_strided", (DType::U32, DType::I64) => "cast_u32_i64_strided", (DType::U8, DType::U32) => "cast_u8_u32_strided", (DType::U8, DType::F32) => "cast_u8_f32_strided", (DType::U8, DType::I64) => "cast_u8_i64_strided", (DType::F32, DType::F16) => "cast_f32_f16_strided", (DType::F16, DType::F32) => "cast_f16_f32_strided", (DType::I64, DType::F32) => "cast_i64_f32_strided", (DType::F32, DType::BF16) => "cast_f32_bf16_strided", (DType::BF16, DType::F32) => "cast_bf16_f32_strided", (left, right) => { crate::bail!("Metal strided to_dtype {left:?} {right:?} not implemented") } }; candle_metal_kernels::call_cast_strided( &device.device, &command_buffer, &device.kernels, kernel_name, layout.dims(), src, layout.stride(), &buffer, ) .map_err(MetalError::from)?; } command_buffer.set_label("to_dtype"); Ok(Self::new(buffer, device.clone(), el_count, dtype)) } fn unary_impl<B: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { let device = self.device(); let dtype = self.dtype; let shape = layout.shape(); let el_count = shape.elem_count(); let buffer = device.new_buffer(el_count, dtype, B::KERNEL)?; let command_buffer = device.command_buffer()?; command_buffer.set_label(B::KERNEL); let src = buffer_o(&self.buffer, layout, self.dtype); match (el_count % 2, dtype, layout.is_contiguous()) { (0, DType::BF16 | DType::F16, true) => { use candle_metal_kernels::unary::contiguous_tiled; let kernel_name = match (B::KERNEL, dtype) { ("uabs", DType::F16) => contiguous_tiled::abs::HALF, ("uabs", DType::F32) => contiguous_tiled::abs::FLOAT, ("uabs", DType::BF16) => contiguous_tiled::abs::BFLOAT, ("uceil", DType::F16) => contiguous_tiled::ceil::HALF, ("uceil", DType::F32) => contiguous_tiled::ceil::FLOAT, ("uceil", DType::BF16) => contiguous_tiled::ceil::BFLOAT, ("ucos", DType::F16) => contiguous_tiled::cos::HALF, ("ucos", DType::F32) => contiguous_tiled::cos::FLOAT, ("ucos", DType::BF16) => contiguous_tiled::cos::BFLOAT, ("uerf", DType::F16) => contiguous_tiled::erf::HALF, ("uerf", DType::F32) => contiguous_tiled::erf::FLOAT, ("uerf", DType::BF16) => contiguous_tiled::erf::BFLOAT, ("uexp", DType::F16) => contiguous_tiled::exp::HALF, ("uexp", DType::F32) => contiguous_tiled::exp::FLOAT, ("uexp", DType::BF16) => contiguous_tiled::exp::BFLOAT, ("ufloor", DType::F16) => contiguous_tiled::floor::HALF, ("ufloor", DType::F32) => contiguous_tiled::floor::FLOAT, ("ufloor", DType::BF16) => contiguous_tiled::floor::BFLOAT, ("ugelu_erf", DType::F16) => contiguous_tiled::gelu_erf::HALF, ("ugelu_erf", DType::F32) => contiguous_tiled::gelu_erf::FLOAT, ("ugelu_erf", DType::BF16) => contiguous_tiled::gelu_erf::BFLOAT, ("ugelu", DType::F16) => contiguous_tiled::gelu::HALF, ("ugelu", DType::F32) => contiguous_tiled::gelu::FLOAT, ("ugelu", DType::BF16) => contiguous_tiled::gelu::BFLOAT, ("ulog", DType::F16) => contiguous_tiled::log::HALF, ("ulog", DType::F32) => contiguous_tiled::log::FLOAT, ("ulog", DType::BF16) => contiguous_tiled::log::BFLOAT, ("uneg", DType::F16) => contiguous_tiled::neg::HALF, ("uneg", DType::F32) => contiguous_tiled::neg::FLOAT, ("uneg", DType::BF16) => contiguous_tiled::neg::BFLOAT, ("urecip", DType::F16) => contiguous_tiled::recip::HALF, ("urecip", DType::F32) => contiguous_tiled::recip::FLOAT, ("urecip", DType::BF16) => contiguous_tiled::recip::BFLOAT, ("urelu", DType::F16) => contiguous_tiled::relu::HALF, ("urelu", DType::F32) => contiguous_tiled::relu::FLOAT, ("urelu", DType::BF16) => contiguous_tiled::relu::BFLOAT, ("uround", DType::F16) => contiguous_tiled::round::HALF, ("uround", DType::F32) => contiguous_tiled::round::FLOAT, ("uround", DType::BF16) => contiguous_tiled::round::BFLOAT, ("usilu", DType::F16) => contiguous_tiled::silu::HALF, ("usilu", DType::F32) => contiguous_tiled::silu::FLOAT, ("usilu", DType::BF16) => contiguous_tiled::silu::BFLOAT, ("usin", DType::F16) => contiguous_tiled::sin::HALF, ("usin", DType::F32) => contiguous_tiled::sin::FLOAT, ("usin", DType::BF16) => contiguous_tiled::sin::BFLOAT, ("usqr", DType::F16) => contiguous_tiled::sqr::HALF, ("usqr", DType::F32) => contiguous_tiled::sqr::FLOAT, ("usqr", DType::BF16) => contiguous_tiled::sqr::BFLOAT, ("usqrt", DType::F16) => contiguous_tiled::sqrt::HALF, ("usqrt", DType::F32) => contiguous_tiled::sqrt::FLOAT, ("usqrt", DType::BF16) => contiguous_tiled::sqrt::BFLOAT, ("utanh", DType::F16) => contiguous_tiled::tanh::HALF, ("utanh", DType::F32) => contiguous_tiled::tanh::FLOAT, ("utanh", DType::BF16) => contiguous_tiled::tanh::BFLOAT, ("usign", DType::F16) => contiguous_tiled::sign::HALF, ("usign", DType::F32) => contiguous_tiled::sign::FLOAT, ("usign", DType::BF16) => contiguous_tiled::sign::BFLOAT, ("usign", DType::I64) => contiguous_tiled::sign::I64, (name, dtype) => { crate::bail!( "Metal contiguous_tiled unary {name} {dtype:?} not implemented" ) } }; candle_metal_kernels::call_unary_contiguous_tiled( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, true) => { use candle_metal_kernels::unary::contiguous; let kernel_name = match (B::KERNEL, dtype) { ("uabs", DType::F16) => contiguous::abs::HALF, ("uabs", DType::F32) => contiguous::abs::FLOAT, ("uabs", DType::BF16) => contiguous::abs::BFLOAT, ("uceil", DType::F16) => contiguous::ceil::HALF, ("uceil", DType::F32) => contiguous::ceil::FLOAT, ("uceil", DType::BF16) => contiguous::ceil::BFLOAT, ("ucos", DType::F16) => contiguous::cos::HALF, ("ucos", DType::F32) => contiguous::cos::FLOAT, ("ucos", DType::BF16) => contiguous::cos::BFLOAT, ("uerf", DType::F16) => contiguous::erf::HALF, ("uerf", DType::F32) => contiguous::erf::FLOAT, ("uerf", DType::BF16) => contiguous::erf::BFLOAT, ("uexp", DType::F16) => contiguous::exp::HALF, ("uexp", DType::F32) => contiguous::exp::FLOAT, ("uexp", DType::BF16) => contiguous::exp::BFLOAT, ("ufloor", DType::F16) => contiguous::floor::HALF, ("ufloor", DType::F32) => contiguous::floor::FLOAT, ("ufloor", DType::BF16) => contiguous::floor::BFLOAT, ("ugelu_erf", DType::F16) => contiguous::gelu_erf::HALF, ("ugelu_erf", DType::F32) => contiguous::gelu_erf::FLOAT, ("ugelu_erf", DType::BF16) => contiguous::gelu_erf::BFLOAT, ("ugelu", DType::F16) => contiguous::gelu::HALF, ("ugelu", DType::F32) => contiguous::gelu::FLOAT, ("ugelu", DType::BF16) => contiguous::gelu::BFLOAT, ("ulog", DType::F16) => contiguous::log::HALF, ("ulog", DType::F32) => contiguous::log::FLOAT, ("ulog", DType::BF16) => contiguous::log::BFLOAT, ("uneg", DType::F16) => contiguous::neg::HALF, ("uneg", DType::F32) => contiguous::neg::FLOAT, ("uneg", DType::BF16) => contiguous::neg::BFLOAT, ("urecip", DType::F16) => contiguous::recip::HALF, ("urecip", DType::F32) => contiguous::recip::FLOAT, ("urecip", DType::BF16) => contiguous::recip::BFLOAT, ("urelu", DType::F16) => contiguous::relu::HALF, ("urelu", DType::F32) => contiguous::relu::FLOAT, ("urelu", DType::BF16) => contiguous::relu::BFLOAT, ("uround", DType::F16) => contiguous::round::HALF, ("uround", DType::F32) => contiguous::round::FLOAT, ("uround", DType::BF16) => contiguous::round::BFLOAT, ("usilu", DType::F16) => contiguous::silu::HALF, ("usilu", DType::F32) => contiguous::silu::FLOAT, ("usilu", DType::BF16) => contiguous::silu::BFLOAT, ("usin", DType::F16) => contiguous::sin::HALF, ("usin", DType::F32) => contiguous::sin::FLOAT, ("usin", DType::BF16) => contiguous::sin::BFLOAT, ("usqr", DType::F16) => contiguous::sqr::HALF, ("usqr", DType::F32) => contiguous::sqr::FLOAT, ("usqr", DType::BF16) => contiguous::sqr::BFLOAT, ("usqrt", DType::F16) => contiguous::sqrt::HALF, ("usqrt", DType::F32) => contiguous::sqrt::FLOAT, ("usqrt", DType::BF16) => contiguous::sqrt::BFLOAT, ("utanh", DType::F16) => contiguous::tanh::HALF, ("utanh", DType::F32) => contiguous::tanh::FLOAT, ("utanh", DType::BF16) => contiguous::tanh::BFLOAT, ("usign", DType::F16) => contiguous::sign::HALF, ("usign", DType::F32) => contiguous::sign::FLOAT, ("usign", DType::BF16) => contiguous::sign::BFLOAT, ("usign", DType::I64) => contiguous::sign::I64, (name, dtype) => { crate::bail!("Metal contiguous unary {name} {dtype:?} not implemented") } }; candle_metal_kernels::call_unary_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, false) => { use candle_metal_kernels::unary::strided; let kernel_name = match (B::KERNEL, dtype) { ("ucos", DType::F32) => strided::cos::FLOAT, ("usin", DType::F32) => strided::sin::FLOAT, ("usqr", DType::F32) => strided::sqr::FLOAT, ("usqrt", DType::F32) => strided::sqrt::FLOAT, ("uneg", DType::F32) => strided::neg::FLOAT, ("uexp", DType::F32) => strided::exp::FLOAT, ("ulog", DType::F32) => strided::log::FLOAT, ("ugelu", DType::F32) => strided::gelu::FLOAT, ("ugelu_erf", DType::F32) => strided::gelu_erf::FLOAT, ("uerf", DType::F32) => strided::erf::FLOAT, ("usilu", DType::F32) => strided::silu::FLOAT, ("uabs", DType::F32) => strided::abs::FLOAT, ("uceil", DType::F32) => strided::ceil::FLOAT, ("ufloor", DType::F32) => strided::floor::FLOAT, ("urelu", DType::F32) => strided::relu::FLOAT, ("uround", DType::F32) => strided::round::FLOAT, ("utanh", DType::F32) => strided::tanh::FLOAT, ("ucos", DType::F16) => strided::cos::HALF, ("usin", DType::F16) => strided::sin::HALF, ("usqr", DType::F16) => strided::sqr::HALF, ("usqrt", DType::F16) => strided::sqrt::HALF, ("uneg", DType::F16) => strided::neg::HALF, ("uexp", DType::F16) => strided::exp::HALF, ("ulog", DType::F16) => strided::log::HALF, ("ugelu", DType::F16) => strided::gelu::HALF, ("ugelu_erf", DType::F16) => strided::gelu_erf::HALF, ("uerf", DType::F16) => strided::erf::HALF, ("usilu", DType::F16) => strided::silu::HALF, ("uabs", DType::F16) => strided::abs::HALF, ("uceil", DType::F16) => strided::ceil::HALF, ("ufloor", DType::F16) => strided::floor::HALF, ("urelu", DType::F16) => strided::relu::HALF, ("uround", DType::F16) => strided::round::HALF, ("utanh", DType::F16) => strided::tanh::HALF, ("ucos", DType::BF16) => strided::cos::BFLOAT, ("usin", DType::BF16) => strided::sin::BFLOAT, ("usqr", DType::BF16) => strided::sqr::BFLOAT, ("usqrt", DType::BF16) => strided::sqrt::BFLOAT, ("uneg", DType::BF16) => strided::neg::BFLOAT, ("uexp", DType::BF16) => strided::exp::BFLOAT, ("ulog", DType::BF16) => strided::log::BFLOAT, ("ugelu", DType::BF16) => strided::gelu::BFLOAT, ("ugelu_erf", DType::BF16) => strided::gelu_erf::BFLOAT, ("uerf", DType::BF16) => strided::erf::BFLOAT, ("usilu", DType::BF16) => strided::silu::BFLOAT, ("uabs", DType::BF16) => strided::abs::BFLOAT, ("uceil", DType::BF16) => strided::ceil::BFLOAT, ("ufloor", DType::BF16) => strided::floor::BFLOAT, ("urelu", DType::BF16) => strided::relu::BFLOAT, ("uround", DType::BF16) => strided::round::BFLOAT, ("utanh", DType::BF16) => strided::tanh::BFLOAT, (name, dtype) => { crate::bail!("Metal strided unary {name} {dtype:?} not implemented") } }; let dst = BufferOffset::zero_offset(&buffer); candle_metal_kernels::call_unary_strided( &device.device, &command_buffer, &device.kernels, kernel_name, layout.dims(), src, layout.stride(), dst, ) .map_err(MetalError::from)?; } } Ok(Self::new(buffer, device.clone(), el_count, dtype)) } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { self.binary(B::KERNEL, rhs, lhs_l, rhs_l) } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { let device = self.device.clone(); let shape = t_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); let dtype = t.dtype; let buffer = self.device.new_buffer(el, dtype, "where")?; let command_buffer = self.device.command_buffer()?; if t.dtype() != f.dtype() { crate::bail!( "Invalid where: different dtypes for values {:?} != {:?}", t.dtype(), f.dtype() ); } let name = match (self.dtype, t.dtype()) { (DType::U8, DType::F32) => "where_u8_f32", (DType::U32, DType::F32) => "where_u32_f32", (DType::U8, DType::BF16) => "where_u8_bf16", (DType::U8, DType::F16) => "where_u8_f16", (DType::U8, DType::I64) => "where_u8_i64", (DType::U8, DType::U32) => "where_u8_u32", (DType::U8, DType::U8) => "where_u8_u8", (left, right) => crate::bail!("Metal where_cond {left:?} {right:?} not implemented"), }; let src = buffer_o(&self.buffer, layout, self.dtype); let t = buffer_o(&t.buffer, t_l, t.dtype); let f = buffer_o(&f.buffer, f_l, f.dtype); candle_metal_kernels::call_where_cond_strided( &device.device, &command_buffer, &device.kernels, name, dims, src, layout.stride(), t, t_l.stride(), f, f_l.stride(), &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device, el, dtype)) } fn conv1d( &self, layout: &Layout, kernel: &Self, kernel_l: &Layout, params: &ParamsConv1D, ) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let dims = shape.dims(); let strides = layout.stride(); let stride = params.stride; let dilation = params.dilation; let padding = params.padding; let k_size = params.k_size; let l_out = (dims[2] + 2 * padding - dilation * (k_size - 1) - 1) / stride + 1; let dst_el = dims[0] * l_out * dims[1] * k_size; let dst = self .device .new_buffer(dst_el, self.dtype, "conv1d_im2col")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "im2col1d_f32", dtype => crate::bail!("Metal conv1d {dtype:?} not implemented"), }; let src = buffer_o(&self.buffer, layout, self.dtype); candle_metal_kernels::call_im2col1d_strided( &self.device.device, &command_buffer, &self.device.kernels, name, layout.shape().dims(), strides, (k_size, stride, padding, dilation), src, &dst, ) .map_err(MetalError::from)?; let col = Self { buffer: dst, device, count: dst_el, dtype: self.dtype, }; let l_out = params.l_out(); let b = params.b_size; let n = params.c_out; let k = params.k_size * params.c_in; let m = l_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, l_out, n)).transpose(1, 2)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose1d( &self, layout: &Layout, k: &Self, k_layout: &Layout, params: &ParamsConvTranspose1D, ) -> Result<Self> { const USE_COL2IM_CONV1D_TR: bool = true; let can_use_col2im = k_layout.is_contiguous() && params.dilation == 1 && params.padding == 0 && params.output_padding == 0; let l_out = params.l_out(); let dst_el = params.c_out * l_out * params.b_size; let buffer = if USE_COL2IM_CONV1D_TR && can_use_col2im { let (b_size, c_in, l_in) = layout.shape().dims3()?; let (c_in2, c_out, k_size) = k_layout.shape().dims3()?; if c_in != c_in2 { crate::bail!( "convtr1d: shape mismatch on c_in {:?} {:?}", layout.shape(), k_layout.shape() ) } let buffer = self .device .new_buffer(dst_el, self.dtype, "conv_transpose1d")?; let name = match self.dtype { DType::F32 => "col2im1d_f32", DType::U32 => "col2im1d_u32", DType::U8 => "col2im1d_u8", dtype => crate::bail!("metal col2im1d {dtype:?} not implemented"), }; let col = { // This merges the last two dimensions of the kernel together. let kernel_l_mm = Layout::new( (b_size, c_in, k_size * c_out).into(), vec![0, k_size * c_out, 1], k_layout.start_offset(), ); self.matmul( k, (b_size, l_in, c_out * k_size, c_in), &layout.transpose(1, 2)?, &kernel_l_mm, )? }; // It is important for the command buffer to be obtained *after* the matmul // kernel has run, otherwise we might use a command-buffer that has been commited // already resulting in the following error. // _status < MTLCommandBufferStatusCommitted > // -[IOGPUMetalCommandBuffer setCurrentCommandEncoder:] let command_buffer = self.device.command_buffer()?; candle_metal_kernels::call_col2im1d( &self.device.device, &command_buffer, &self.device.kernels, name, &[b_size, l_in, c_out, k_size], params.k_size, params.stride, BufferOffset::zero_offset(&col.buffer), &buffer, ) .map_err(MetalError::from)?; buffer } else { let buffer = self .device .new_buffer(dst_el, self.dtype, "conv_transpose1d")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "conv_transpose1d_f32", DType::F16 => "conv_transpose1d_f16", DType::BF16 => "conv_transpose1d_bf16", DType::U32 => "conv_transpose1d_u32", DType::U8 => "conv_transpose1d_u8", dtype => crate::bail!("Metal conv_transpose1d {dtype:?} not implemented"), }; candle_metal_kernels::call_conv_transpose1d( &self.device.device, &command_buffer, &self.device.kernels, name, params.dilation, params.stride, params.padding, params.output_padding, params.c_out, l_out, params.b_size, layout.dims(), layout.stride(), k_layout.dims(), k_layout.stride(), &self.buffer, layout.start_offset() * self.dtype.size_in_bytes(), &k.buffer, k_layout.start_offset() * k.dtype.size_in_bytes(), &buffer, ) .map_err(MetalError::from)?; buffer }; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn conv2d( &self, layout: &Layout, kernel: &Self, kernel_l: &Layout, params: &ParamsConv2D, ) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let dims = shape.dims(); let stride = params.stride; let dilation = params.dilation; let padding = params.padding; let h_k = params.k_h; let w_k = params.k_w; let h = dims[2]; let w = dims[3]; let h_out = (h + 2 * padding - dilation * (h_k - 1) - 1) / stride + 1; let w_out = (w + 2 * padding - dilation * (w_k - 1) - 1) / stride + 1; let dst_el = dims[0] * h_out * w_out * dims[1] * h_k * w_k; let dst = self .device .new_buffer(dst_el, self.dtype, "conv2d_im2col")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "im2col_f32", DType::F16 => "im2col_f16", DType::BF16 => "im2col_bf16", DType::U8 => "im2col_u8", DType::U32 => "im2col_u32", dtype => crate::bail!("Metal conv2d {dtype:?} not implemented"), }; let src = buffer_o(&self.buffer, layout, self.dtype); candle_metal_kernels::call_im2col_strided( &self.device.device, &command_buffer, &self.device.kernels, name, layout.shape().dims(), layout.stride(), (h_k, w_k, stride, padding, dilation), src, &dst, ) .map_err(MetalError::from)?; let col = Self { buffer: dst, device, count: dst_el, dtype: self.dtype, }; let h_out = params.out_h(); let w_out = params.out_w(); let b = params.b_size; let n = params.c_out; let k = params.k_h * params.k_w * params.c_in; let m = h_out * w_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, h_out, w_out, n)) .transpose(1, 2)? .transpose(1, 3)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &ParamsConvTranspose2D, ) -> Result<Self> { // Kernel shape: (c_in_k, c_out, h_k, w_k) // Input shape: (b_size, c_in, h_in, w_in) let (out_w, out_h) = (params.out_w(), params.out_h()); let dst_el = params.c_out * out_w * out_h * params.b_size; let dims = l.dims(); if dims.len() != 4 { crate::bail!("unexpected input shape for conv_transpose2d {dims:?}, expected 4") } let k_dims = kernel_l.dims(); if k_dims.len() != 4 { crate::bail!("unexpected kernel shape for conv_transpose2d {k_dims:?}, expected 4") } let buffer = self .device .new_buffer(dst_el, self.dtype, "conv_transpose2d")?; let command_buffer = self.device.command_buffer()?; let name = match self.dtype { DType::F32 => "conv_transpose2d_f32", DType::F16 => "conv_transpose2d_f16", DType::BF16 => "conv_transpose2d_bf16", dtype => crate::bail!("Metal conv_transpose2d {dtype:?} not implemented"), }; candle_metal_kernels::call_conv_transpose2d( &self.device.device, &command_buffer, &self.device.kernels, name, CallConvTranspose2dCfg { dilation: params.dilation, stride: params.stride, padding: params.padding, output_padding: params.output_padding, c_out: params.c_out, out_h, out_w, b_size: params.b_size, input_dims: l.dims(), input_stride: l.stride(), kernel_dims: kernel_l.dims(), kernel_stride: kernel_l.stride(), input_offset: l.start_offset() * self.dtype.size_in_bytes(), kernel_offset: kernel_l.start_offset() * kernel.dtype.size_in_bytes(), }, &self.buffer, &kernel.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn avg_pool2d( &self, inp_l: &Layout, (w_k, h_k): (usize, usize), (w_stride, h_stride): (usize, usize), ) -> Result<Self> { let shape = inp_l.shape(); let (b_size, channels, width, height) = shape.dims4()?; let strides = inp_l.stride(); let name = match self.dtype { DType::F32 => "avg_pool2d_f32", DType::F16 => "avg_pool2d_f16", DType::BF16 => "avg_pool2d_bf16", DType::U8 => "avg_pool2d_u8", DType::U32 => "avg_pool2d_u32", dtype => crate::bail!("Metal avg_pool2d {dtype:?} not implemented"), }; let out_w = (width - w_k) / w_stride + 1; let out_h = (height - h_k) / h_stride + 1; let dst_el = out_w * out_h * b_size * channels; let buffer = self.device.new_buffer(dst_el, self.dtype, "avg_pool2d")?; let command_buffers = self.device.command_buffer()?; candle_metal_kernels::call_pool2d( &self.device.device, &command_buffers, &self.device.kernels, name, inp_l.dims(), strides, out_w, out_h, w_k, h_k, w_stride, h_stride, &self.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn max_pool2d( &self, inp_l: &Layout, (w_k, h_k): (usize, usize), (w_stride, h_stride): (usize, usize), ) -> Result<Self> { let shape = inp_l.shape(); let (b_size, channels, width, height) = shape.dims4()?; let strides = inp_l.stride(); let name = match self.dtype { DType::F32 => "max_pool2d_f32", DType::F16 => "max_pool2d_f16", DType::BF16 => "max_pool2d_bf16", DType::U8 => "max_pool2d_u8", DType::U32 => "max_pool2d_u32", dtype => crate::bail!("Metal max_pool2d {dtype:?} not implemented"), }; let out_w = (width - w_k) / w_stride + 1; let out_h = (height - h_k) / h_stride + 1; let dst_el = out_w * out_h * b_size * channels; let buffer = self.device.new_buffer(dst_el, self.dtype, "max_pool2d")?; let command_buffers = self.device.command_buffer()?; candle_metal_kernels::call_pool2d( &self.device.device, &command_buffers, &self.device.kernels, name, inp_l.dims(), strides, out_w, out_h, w_k, h_k, w_stride, h_stride, &self.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> { crate::bail!("Metal upsample_nearest1d not implemented") } fn upsample_nearest2d(&self, inp_l: &Layout, out_w: usize, out_h: usize) -> Result<Self> { // let inp = &inp.slice(inp_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let strides = inp_l.stride(); if dims.len() != 4 { crate::bail!("unexpected input shape for upsample {dims:?}") } let name = match self.dtype { DType::F32 => "upsample_nearest2d_f32", DType::F16 => "upsample_nearest2d_f16", DType::BF16 => "upsample_nearest2d_bf16", DType::U8 => "upsample_nearest2d_u8", DType::U32 => "upsample_nearest2d_u32", dtype => crate::bail!("Metal upsample_nearest2d {dtype:?} not implemented"), }; let dst_el = out_w * out_h * dims[0] * dims[1]; let buffer = self .device .new_buffer(dst_el, self.dtype, "upsample_nearest2d")?; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, inp_l, self.dtype); candle_metal_kernels::call_upsample_nearest_2d( &self.device.device, &command_buffer, &self.device.kernels, name, dims, strides, out_w, out_h, src, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, self.device.clone(), dst_el, self.dtype)) } fn gather(&self, src_l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> { if !ids_l.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "gather" }.bt()); }; let ids_el = ids_l.dims()[dim]; let dst_el = ids_l.shape().elem_count(); let dtype = self.dtype; let device = self.device(); let buffer = device.new_buffer(dst_el, dtype, "index_select")?; let name = match (ids.dtype, self.dtype) { (DType::U32, DType::F32) => "gather_u32_f32", (DType::U32, DType::F16) => "gather_u32_f16", (DType::U32, DType::BF16) => "gather_u32_bf16", (left, right) => crate::bail!("Metal gather {left:?} {right:?} not implemented"), }; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, src_l, dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_gather( &device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), ids_el, dim, src, ids, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device.clone(), dst_el, dtype)) } fn scatter_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { let mut acc = self.device.zeros_impl(l.shape(), self.dtype())?; self.copy_strided_src(&mut acc, 0, l)?; if !ids_l.is_contiguous() || !src_l.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "scatter-add" }.bt()); }; let name = match (ids.dtype, self.dtype) { (DType::U8, DType::F32) => "sa_u8_f32", (DType::U8, DType::F16) => "sa_u8_f16", (DType::U8, DType::BF16) => "sa_u8_bf16", (DType::U32, DType::F32) => "sa_u32_f32", (DType::U32, DType::F16) => "sa_u32_f16", (DType::U32, DType::BF16) => "sa_u32_bf16", (DType::I64, DType::F32) => "sa_i64_f32", (DType::I64, DType::F16) => "sa_i64_f16", (DType::I64, DType::BF16) => "sa_i64_bf16", _ => Err(MetalError::UnexpectedDType { msg: "scatter-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&src.buffer, src_l, src.dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_scatter_add( &self.device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), l.dims(), dim, src, ids, &acc.buffer, ) .map_err(MetalError::from)?; Ok(acc) } fn index_select(&self, ids: &Self, src_l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { if !ids_l.is_contiguous() { crate::bail!("Metal index_select requires contiguous ids") } let left_size: usize = src_l.dims()[..dim].iter().product(); let right_size: usize = src_l.dims()[dim + 1..].iter().product(); let ids_el = ids_l.shape().elem_count(); let dst_el = ids_el * left_size * right_size; let dtype = self.dtype; let device = self.device(); let buffer = device.new_buffer(dst_el, dtype, "index_select")?; let name = match (ids.dtype, self.dtype) { (DType::U8, DType::BF16) => "is_u8_bf16", (DType::U8, DType::F32) => "is_u8_f32", (DType::U8, DType::F16) => "is_u8_f16", (DType::U32, DType::F32) => "is_u32_f32", (DType::U32, DType::F16) => "is_u32_f16", (DType::U32, DType::BF16) => "is_u32_bf16", (DType::I64, DType::F32) => "is_i64_f32", (DType::I64, DType::F16) => "is_i64_f16", (DType::I64, DType::BF16) => "is_i64_bf16", (left, right) => { crate::bail!("Metal contiguous index_select {left:?} {right:?} not implemented") } }; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&self.buffer, src_l, dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_index_select( &device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), ids_el, dim, src_l.is_contiguous(), src_l.dims(), src_l.stride(), src, ids, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new(buffer, device.clone(), dst_el, dtype)) } fn index_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { let mut acc = self.device.zeros_impl(l.shape(), self.dtype())?; self.copy_strided_src(&mut acc, 0, l)?; if !ids_l.is_contiguous() || !src_l.is_contiguous() { return Err(crate::Error::RequiresContiguous { op: "index-add" }.bt()); }; let name = match (ids.dtype, self.dtype) { (DType::I64, DType::BF16) => "ia_i64_bf16", (DType::I64, DType::F16) => "ia_i64_f16", (DType::I64, DType::F32) => "ia_i64_f32", (DType::I64, DType::I64) => "ia_i64_i64", (DType::I64, DType::U32) => "ia_i64_u32", (DType::I64, DType::U8) => "ia_i64_u8", (DType::U32, DType::BF16) => "ia_u32_bf16", (DType::U32, DType::F16) => "ia_u32_f16", (DType::U32, DType::F32) => "ia_u32_f32", (DType::U32, DType::I64) => "ia_u32_i64", (DType::U32, DType::U32) => "ia_u32_u32", (DType::U32, DType::U8) => "ia_u32_u8", (DType::U8, DType::BF16) => "ia_u8_bf16", (DType::U8, DType::F16) => "ia_u8_f16", (DType::U8, DType::F32) => "ia_u8_f32", (DType::U8, DType::I64) => "ia_u8_i64", (DType::U8, DType::U32) => "ia_u8_u32", (DType::U8, DType::U8) => "ia_u8_u8", _ => Err(MetalError::UnexpectedDType { msg: "index-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let command_buffer = self.device.command_buffer()?; let src = buffer_o(&src.buffer, src_l, src.dtype); let ids = buffer_o(&ids.buffer, ids_l, ids.dtype); candle_metal_kernels::call_index_add( &self.device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), l.dims(), ids_l.dims(), dim, src, ids, &acc.buffer, ) .map_err(MetalError::from)?; Ok(acc) } fn matmul( &self, rhs: &Self, (b, m, n, k): (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let buffer = self.device.new_buffer(b * m * n, self.dtype, "matmul")?; let name = match self.dtype { DType::F32 => "sgemm", DType::F16 => "hgemm", DType::BF16 => "bgemm", dtype => { return Err(MetalError::Message(format!("matmul doesn't support {dtype:?}")).into()) } }; let command_buffer = self.device.command_buffer()?; command_buffer.set_label("matmul"); candle_metal_kernels::call_gemm( &self.device.device, &command_buffer, &self.device.kernels, name, (b, m, n, k), lhs_l.stride(), lhs_l.start_offset() * self.dtype.size_in_bytes(), &self.buffer, rhs_l.stride(), rhs_l.start_offset() * rhs.dtype.size_in_bytes(), &rhs.buffer, &buffer, ) .map_err(MetalError::from)?; Ok(Self::new( buffer, self.device.clone(), b * m * n, self.dtype(), )) } fn copy2d( &self, dst: &mut Self, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o: usize, dst_o: usize, ) -> Result<()> { if self.dtype() != dst.dtype() { crate::bail!( "copy2d with inconsistent dtypes {:?} {:?}", self.dtype(), dst.dtype() ) } let command_buffer = self.device.command_buffer()?; if src_s == d2 && dst_s == d2 { command_buffer.set_label("copy2d_contiguous"); let blit = command_buffer.new_blit_command_encoder(); blit.set_label("copy2d_contiguous"); let src_offset = (src_o * self.dtype.size_in_bytes()) as NSUInteger; let length = (d1 * d2 * self.dtype.size_in_bytes()) as NSUInteger; let dst_offset = (dst_o * dst.dtype().size_in_bytes()) as NSUInteger; blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length); blit.end_encoding(); } else { let el_count = d1 * d2; if el_count == 0 { return Ok(()); } let kernel_name = match self.dtype { DType::F32 => candle_metal_kernels::copy2d::FLOAT, DType::F16 => candle_metal_kernels::copy2d::HALF, DType::BF16 => candle_metal_kernels::copy2d::BFLOAT, DType::I64 => candle_metal_kernels::copy2d::I64, DType::U32 => candle_metal_kernels::copy2d::U32, DType::U8 => candle_metal_kernels::copy2d::U8, dtype => crate::bail!("Metal copy2d {dtype:?} not implemented"), }; candle_metal_kernels::call_copy2d( &self.device.device, &command_buffer, &self.device.kernels, kernel_name, &self.buffer, &dst.buffer, d1, d2, src_s, dst_s, src_o * self.dtype.size_in_bytes(), dst_o * self.dtype.size_in_bytes(), ) .map_err(MetalError::from)?; command_buffer.set_label("copy2d"); } Ok(()) } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { let command_buffer = self.device.command_buffer()?; if src_l.is_contiguous() && self.dtype == dst.dtype() { command_buffer.set_label("copy_contiguous"); let blit = command_buffer.new_blit_command_encoder(); blit.set_label("copy_contiguous"); let src_offset = (src_l.start_offset() * self.dtype.size_in_bytes()) as NSUInteger; let length = (src_l.shape().elem_count() * self.dtype.size_in_bytes()) as NSUInteger; let dst_offset = (dst_offset * dst.dtype().size_in_bytes()) as NSUInteger; blit.copy_from_buffer(&self.buffer, src_offset, dst.buffer(), dst_offset, length); blit.end_encoding(); } else { let src_shape = src_l.shape(); let el_count = src_shape.elem_count(); if el_count == 0 { return Ok(()); } let kernel_name = match self.dtype { DType::F32 => candle_metal_kernels::unary::strided::copy::FLOAT, DType::F16 => candle_metal_kernels::unary::strided::copy::HALF, DType::BF16 => candle_metal_kernels::unary::strided::copy::BFLOAT, DType::I64 => candle_metal_kernels::unary::strided::copy::I64, DType::U32 => candle_metal_kernels::unary::strided::copy::U32, DType::U8 => candle_metal_kernels::unary::strided::copy::U8, dtype => crate::bail!("Metal copy_strided {dtype:?} not implemented"), }; let src = buffer_o(&self.buffer, src_l, self.dtype); let dst = BufferOffset { buffer: &dst.buffer, offset_in_bytes: dst_offset * dst.dtype.size_in_bytes(), }; candle_metal_kernels::call_unary_strided( &self.device.device, &command_buffer, &self.device.kernels, kernel_name, src_l.dims(), src, src_l.stride(), dst, ) .map_err(MetalError::from)?; command_buffer.set_label("copy_strided"); } Ok(()) } } impl MetalStorage { pub fn new(buffer: Arc<Buffer>, device: MetalDevice, count: usize, dtype: DType) -> Self { Self { buffer, device, count, dtype, } } pub fn buffer(&self) -> &Buffer { &self.buffer } pub fn binary( &self, op: &'static str, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let device = self.device(); let shape = lhs_l.shape(); let el_count = shape.elem_count(); let command_buffer = device.command_buffer()?; let lhs = buffer_o(&self.buffer, lhs_l, self.dtype); let rhs = buffer_o(&rhs.buffer, rhs_l, rhs.dtype); let (buffer, dtype) = if lhs_l.is_contiguous() && rhs_l.is_contiguous() && &op[..1] != "b" { use candle_metal_kernels::binary::contiguous; let (kernel_name, dtype) = match (op, self.dtype) { ("add", DType::F32) => (contiguous::add::FLOAT, self.dtype), ("sub", DType::F32) => (contiguous::sub::FLOAT, self.dtype), ("mul", DType::F32) => (contiguous::mul::FLOAT, self.dtype), ("div", DType::F32) => (contiguous::div::FLOAT, self.dtype), ("eq", DType::F32) => (contiguous::eq::FLOAT, DType::U8), ("ne", DType::F32) => (contiguous::ne::FLOAT, DType::U8), ("le", DType::F32) => (contiguous::le::FLOAT, DType::U8), ("lt", DType::F32) => (contiguous::lt::FLOAT, DType::U8), ("ge", DType::F32) => (contiguous::ge::FLOAT, DType::U8), ("gt", DType::F32) => (contiguous::gt::FLOAT, DType::U8), ("add", DType::F16) => (contiguous::add::HALF, self.dtype), ("sub", DType::F16) => (contiguous::sub::HALF, self.dtype), ("mul", DType::F16) => (contiguous::mul::HALF, self.dtype), ("div", DType::F16) => (contiguous::div::HALF, self.dtype), ("eq", DType::F16) => (contiguous::eq::HALF, DType::U8), ("ne", DType::F16) => (contiguous::ne::HALF, DType::U8), ("le", DType::F16) => (contiguous::le::HALF, DType::U8), ("lt", DType::F16) => (contiguous::lt::HALF, DType::U8), ("ge", DType::F16) => (contiguous::ge::HALF, DType::U8), ("gt", DType::F16) => (contiguous::gt::HALF, DType::U8), ("add", DType::BF16) => (contiguous::add::BFLOAT, self.dtype), ("sub", DType::BF16) => (contiguous::sub::BFLOAT, self.dtype), ("mul", DType::BF16) => (contiguous::mul::BFLOAT, self.dtype), ("div", DType::BF16) => (contiguous::div::BFLOAT, self.dtype), ("eq", DType::BF16) => (contiguous::eq::BFLOAT, DType::U8), ("ne", DType::BF16) => (contiguous::ne::BFLOAT, DType::U8), ("le", DType::BF16) => (contiguous::le::BFLOAT, DType::U8), ("lt", DType::BF16) => (contiguous::lt::BFLOAT, DType::U8), ("ge", DType::BF16) => (contiguous::ge::BFLOAT, DType::U8), ("gt", DType::BF16) => (contiguous::gt::BFLOAT, DType::U8), ("add", DType::I64) => (contiguous::add::I64, self.dtype), ("sub", DType::I64) => (contiguous::sub::I64, self.dtype), ("mul", DType::I64) => (contiguous::mul::I64, self.dtype), ("div", DType::I64) => (contiguous::div::I64, self.dtype), ("eq", DType::I64) => (contiguous::eq::I64, DType::U8), ("ne", DType::I64) => (contiguous::ne::I64, DType::U8), ("le", DType::I64) => (contiguous::le::I64, DType::U8), ("lt", DType::I64) => (contiguous::lt::I64, DType::U8), ("ge", DType::I64) => (contiguous::ge::I64, DType::U8), ("gt", DType::I64) => (contiguous::gt::I64, DType::U8), ("add", DType::U32) => (contiguous::add::U32, self.dtype), ("sub", DType::U32) => (contiguous::sub::U32, self.dtype), ("mul", DType::U32) => (contiguous::mul::U32, self.dtype), ("div", DType::U32) => (contiguous::div::U32, self.dtype), ("eq", DType::U32) => (contiguous::eq::U32, DType::U8), ("ne", DType::U32) => (contiguous::ne::U32, DType::U8), ("le", DType::U32) => (contiguous::le::U32, DType::U8), ("lt", DType::U32) => (contiguous::lt::U32, DType::U8), ("ge", DType::U32) => (contiguous::ge::U32, DType::U8), ("gt", DType::U32) => (contiguous::gt::U32, DType::U8), ("add", DType::U8) => (contiguous::add::U8, self.dtype), ("sub", DType::U8) => (contiguous::sub::U8, self.dtype), ("mul", DType::U8) => (contiguous::mul::U8, self.dtype), ("div", DType::U8) => (contiguous::div::U8, self.dtype), ("eq", DType::U8) => (contiguous::eq::U8, DType::U8), ("ne", DType::U8) => (contiguous::ne::U8, DType::U8), ("le", DType::U8) => (contiguous::le::U8, DType::U8), ("lt", DType::U8) => (contiguous::lt::U8, DType::U8), ("ge", DType::U8) => (contiguous::ge::U8, DType::U8), ("gt", DType::U8) => (contiguous::gt::U8, DType::U8), (name, dtype) => { crate::bail!("Metal contiguous binary {name} {dtype:?} not implemented") } }; let buffer = device.new_buffer(el_count, dtype, op)?; candle_metal_kernels::call_binary_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, lhs, rhs, &buffer, ) .map_err(MetalError::from)?; (buffer, dtype) } else { use candle_metal_kernels::binary::strided; let (kernel_name, dtype) = match (op, self.dtype) { ("badd", DType::F32) => (strided::add::FLOAT, self.dtype), ("bsub", DType::F32) => (strided::sub::FLOAT, self.dtype), ("bmul", DType::F32) => (strided::mul::FLOAT, self.dtype), ("bdiv", DType::F32) => (strided::div::FLOAT, self.dtype), ("bminimum", DType::F32) => (strided::min::FLOAT, self.dtype), ("bmaximum", DType::F32) => (strided::max::FLOAT, self.dtype), ("eq", DType::F32) => (strided::eq::FLOAT, DType::U8), ("ne", DType::F32) => (strided::ne::FLOAT, DType::U8), ("le", DType::F32) => (strided::le::FLOAT, DType::U8), ("lt", DType::F32) => (strided::lt::FLOAT, DType::U8), ("ge", DType::F32) => (strided::ge::FLOAT, DType::U8), ("gt", DType::F32) => (strided::gt::FLOAT, DType::U8), ("badd", DType::F16) => (strided::add::HALF, self.dtype), ("bsub", DType::F16) => (strided::sub::HALF, self.dtype), ("bmul", DType::F16) => (strided::mul::HALF, self.dtype), ("bdiv", DType::F16) => (strided::div::HALF, self.dtype), ("bminimum", DType::F16) => (strided::min::HALF, self.dtype), ("bmaximum", DType::F16) => (strided::max::HALF, self.dtype), ("eq", DType::F16) => (strided::eq::HALF, DType::U8), ("ne", DType::F16) => (strided::ne::HALF, DType::U8), ("le", DType::F16) => (strided::le::HALF, DType::U8), ("lt", DType::F16) => (strided::lt::HALF, DType::U8), ("ge", DType::F16) => (strided::ge::HALF, DType::U8), ("gt", DType::F16) => (strided::gt::HALF, DType::U8), ("badd", DType::BF16) => (strided::add::BFLOAT, self.dtype), ("bsub", DType::BF16) => (strided::sub::BFLOAT, self.dtype), ("bmul", DType::BF16) => (strided::mul::BFLOAT, self.dtype), ("bdiv", DType::BF16) => (strided::div::BFLOAT, self.dtype), ("bminimum", DType::BF16) => (strided::min::BFLOAT, self.dtype), ("bmaximum", DType::BF16) => (strided::max::BFLOAT, self.dtype), ("eq", DType::BF16) => (strided::eq::BFLOAT, DType::U8), ("ne", DType::BF16) => (strided::ne::BFLOAT, DType::U8), ("le", DType::BF16) => (strided::le::BFLOAT, DType::U8), ("lt", DType::BF16) => (strided::lt::BFLOAT, DType::U8), ("ge", DType::BF16) => (strided::ge::BFLOAT, DType::U8), ("gt", DType::BF16) => (strided::gt::BFLOAT, DType::U8), ("badd", DType::I64) => (strided::add::I64, self.dtype), ("bsub", DType::I64) => (strided::sub::I64, self.dtype), ("bmul", DType::I64) => (strided::mul::I64, self.dtype), ("bdiv", DType::I64) => (strided::div::I64, self.dtype), ("bminimum", DType::I64) => (strided::min::I64, self.dtype), ("bmaximum", DType::I64) => (strided::max::I64, self.dtype), ("eq", DType::I64) => (strided::eq::I64, DType::U8), ("ne", DType::I64) => (strided::ne::I64, DType::U8), ("le", DType::I64) => (strided::le::I64, DType::U8), ("lt", DType::I64) => (strided::lt::I64, DType::U8), ("ge", DType::I64) => (strided::ge::I64, DType::U8), ("gt", DType::I64) => (strided::gt::I64, DType::U8), ("badd", DType::U32) => (strided::add::U32, self.dtype), ("bsub", DType::U32) => (strided::sub::U32, self.dtype), ("bmul", DType::U32) => (strided::mul::U32, self.dtype), ("bdiv", DType::U32) => (strided::div::U32, self.dtype), ("bminimum", DType::U32) => (strided::min::U32, self.dtype), ("bmaximum", DType::U32) => (strided::max::U32, self.dtype), ("eq", DType::U32) => (strided::eq::U32, DType::U8), ("ne", DType::U32) => (strided::ne::U32, DType::U8), ("le", DType::U32) => (strided::le::U32, DType::U8), ("lt", DType::U32) => (strided::lt::U32, DType::U8), ("ge", DType::U32) => (strided::ge::U32, DType::U8), ("gt", DType::U32) => (strided::gt::U32, DType::U8), ("badd", DType::U8) => (strided::add::U8, self.dtype), ("bsub", DType::U8) => (strided::sub::U8, self.dtype), ("bmul", DType::U8) => (strided::mul::U8, self.dtype), ("bdiv", DType::U8) => (strided::div::U8, self.dtype), ("bminimum", DType::U8) => (strided::min::U8, self.dtype), ("bmaximum", DType::U8) => (strided::max::U8, self.dtype), ("eq", DType::U8) => (strided::eq::U8, DType::U8), ("ne", DType::U8) => (strided::ne::U8, DType::U8), ("le", DType::U8) => (strided::le::U8, DType::U8), ("lt", DType::U8) => (strided::lt::U8, DType::U8), ("ge", DType::U8) => (strided::ge::U8, DType::U8), ("gt", DType::U8) => (strided::gt::U8, DType::U8), (name, dtype) => { crate::bail!("Metal strided binary {name} {dtype:?} not implemented") } }; let buffer = device.new_buffer(el_count, dtype, op)?; candle_metal_kernels::call_binary_strided( &device.device, &command_buffer, &device.kernels, kernel_name, lhs_l.dims(), lhs, lhs_l.stride(), rhs, rhs_l.stride(), &buffer, ) .map_err(MetalError::from)?; (buffer, dtype) }; command_buffer.set_label("binary"); Ok(Self::new(buffer, device.clone(), el_count, dtype)) } pub(crate) fn to_cpu<T: Clone>(&self) -> Result<Vec<T>> { let size = (self.count * self.dtype.size_in_bytes()) as NSUInteger; let buffer = self.device.new_buffer_managed(size)?; { let command_buffer = self.device.command_buffer()?; command_buffer.set_label("to_cpu"); let blit = command_buffer.new_blit_command_encoder(); blit.set_label("blit_to_cpu"); blit.copy_from_buffer(&self.buffer, 0, &buffer, 0, size); blit.end_encoding(); } self.device.wait_until_completed()?; Ok(read_to_vec(&buffer, self.count)) } } impl BackendDevice for MetalDevice { type Storage = MetalStorage; fn new(ordinal: usize) -> Result<Self> { let device = metal::Device::all().swap_remove(ordinal); let command_queue = device.new_command_queue(); let command_buffer = command_queue.new_command_buffer().to_owned(); command_buffer.enqueue(); let command_buffer = Arc::new(RwLock::new(command_buffer)); let command_buffer_index = Arc::new(RwLock::new(0)); let kernels = Arc::new(Kernels::new()); let buffers = Arc::new(RwLock::new(HashMap::new())); let compute_per_buffer = match std::env::var("CANDLE_METAL_COMPUTE_PER_BUFFER") { Ok(val) => val.parse()?, _ => 50, }; let seed = Arc::new(Mutex::new(device.new_buffer_with_data( [299792458].as_ptr() as *const c_void, 4, MTLResourceOptions::StorageModeManaged, ))); Ok(Self { id: DeviceId::new(), device, command_queue, command_buffer, command_buffer_index, compute_per_buffer, buffers, kernels, seed, }) } fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Metal { gpu_id: self.registry_id() as usize, } } fn same_device(&self, rhs: &Self) -> bool { self.id == rhs.id } unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> { let buffer = self.new_buffer(shape.elem_count(), dtype, "alloc-uninit")?; Ok(MetalStorage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> { let size = shape.elem_count() * dtype.size_in_bytes(); let buffer = self.allocate_zeros(size)?; Ok(MetalStorage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<Self::Storage> { // TODO Is there a faster way ? let cpu_storage = crate::cpu_backend::CpuDevice.ones_impl(shape, dtype)?; self.storage_from_cpu_storage(&cpu_storage) } fn storage_from_slice<T: crate::WithDType>(&self, s: &[T]) -> Result<Self::Storage> { let (count, buffer) = match T::cpu_storage_ref(s) { CpuStorageRef::U8(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::U32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::I64(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::BF16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::F16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::F32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorageRef::F64(storage) => (storage.len(), self.new_buffer_with_data(storage)), }; Ok(Self::Storage::new(buffer?, self.clone(), count, T::DTYPE)) } fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<Self::Storage> { let (count, buffer) = match storage { CpuStorage::U8(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::U32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::I64(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::BF16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::F16(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::F32(storage) => (storage.len(), self.new_buffer_with_data(storage)), CpuStorage::F64(storage) => (storage.len(), self.new_buffer_with_data(storage)), }; Ok(Self::Storage::new( buffer?, self.clone(), count, storage.dtype(), )) } fn storage_from_cpu_storage_owned(&self, storage: CpuStorage) -> Result<Self::Storage> { self.storage_from_cpu_storage(&storage) } fn rand_uniform( &self, shape: &Shape, dtype: DType, min: f64, max: f64, ) -> Result<Self::Storage> { let name = match dtype { DType::F32 => "rand_uniform_f32", DType::F16 => "rand_uniform_f16", DType::BF16 => "rand_uniform_bf16", dtype => crate::bail!("rand_uniform not implemented for {dtype:?}"), }; let buffer = self.new_buffer(shape.elem_count(), dtype, "rand_uniform")?; let command_buffer = self.command_buffer()?; candle_metal_kernels::call_random_uniform( &self.device, &command_buffer, &self.kernels, name, min as f32, max as f32, shape.elem_count(), &self.seed.lock().unwrap(), &buffer, ) .map_err(MetalError::from)?; Ok(Self::Storage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn rand_normal( &self, shape: &Shape, dtype: DType, mean: f64, stddev: f64, ) -> Result<Self::Storage> { let name = match dtype { DType::F32 => "rand_normal_f32", DType::F16 => "rand_normal_f16", DType::BF16 => "rand_normal_bf16", dtype => crate::bail!("rand_uniform not implemented for {dtype:?}"), }; let buffer = self.new_buffer(shape.elem_count(), dtype, "rand_normal")?; let command_buffer = self.command_buffer()?; candle_metal_kernels::call_random_normal( &self.device, &command_buffer, &self.kernels, name, mean as f32, stddev as f32, shape.elem_count(), &self.seed.lock().unwrap(), &buffer, ) .map_err(MetalError::from)?; Ok(Self::Storage::new( buffer, self.clone(), shape.elem_count(), dtype, )) } fn set_seed(&self, seed: u64) -> Result<()> { let seed: u32 = seed.try_into().map_err(|_| { MetalError::Message("Metal seed must be less than or equal to u32::MAX".to_string()) })?; let seed_buffer = self.seed.try_lock().map_err(MetalError::from)?; let contents = seed_buffer.contents(); unsafe { std::ptr::copy([seed].as_ptr(), contents as *mut u32, 1); } seed_buffer.did_modify_range(metal::NSRange::new(0, 4)); Ok(()) } fn synchronize(&self) -> Result<()> { self.wait_until_completed() } } fn read_to_vec<T: Clone>(buffer: &Buffer, n: usize) -> Vec<T> { let ptr = buffer.contents() as *const T; assert!(!ptr.is_null()); let slice = unsafe { std::slice::from_raw_parts(ptr, n) }; slice.to_vec() }
candle/candle-core/src/metal_backend/mod.rs/0
{ "file_path": "candle/candle-core/src/metal_backend/mod.rs", "repo_id": "candle", "token_count": 47057 }
19
use crate::Result; pub(super) fn nearest_int(v: f32) -> i32 { v.round() as i32 } /// Validates that the input and output are the right size and returns an iterator which maps each /// input region `xs` to its corresponding output block in `ys`. Each output region is guaranteed /// to be `T::BLCK_SIZE` long. pub(super) fn group_for_quantization<'a, 'b, T: super::k_quants::GgmlType>( xs: &'b [f32], ys: &'a mut [T], ) -> Result<Vec<(&'a mut T, &'b [f32])>> { let block_size = T::BLCK_SIZE; let dtype = T::DTYPE; let expected_blocks = xs.len() / block_size; let actual_blocks = ys.len(); // Validate that the input is the right size if expected_blocks != actual_blocks { crate::bail!("quantize {dtype:?}: expected {expected_blocks} blocks but only {actual_blocks} were provided!") } Ok(ys.iter_mut().zip(xs.chunks_exact(block_size)).collect()) } /// Validates that the input and output are the right size and returns an iterator which maps each /// input block `xs` to its corresponding output region in `ys`. Each output region is guaranteed /// to be `T::BLCK_SIZE` long. pub(super) fn group_for_dequantization<'a, 'b, T: super::k_quants::GgmlType>( xs: &'a [T], ys: &'b mut [f32], ) -> Result<Vec<(&'a T, &'b mut [f32])>> { let block_size = T::BLCK_SIZE; let dtype = T::DTYPE; let actual_output_len = ys.len(); let expected_output_len = xs.len() * block_size; // Validate that the output is the right size if expected_output_len != actual_output_len { crate::bail!("dequantize {dtype:?}: ys (len = {actual_output_len}) does not match the expected length of {expected_output_len}!") } // Zip the blocks and outputs together Ok(xs.iter().zip(ys.chunks_exact_mut(block_size)).collect()) } pub(super) fn get_scale_min_k4(j: usize, q: &[u8]) -> (u8, u8) { if j < 4 { let d = q[j] & 63; let m = q[j + 4] & 63; (d, m) } else { let d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4); let m = (q[j + 4] >> 4) | ((q[j] >> 6) << 4); (d, m) } } pub(super) unsafe fn make_qx_quants( n: usize, nmax: i32, x: *const f32, ls: *mut i8, rmse_type: i32, ) -> f32 { let mut max = 0f32; let mut amax = 0f32; for i in 0..n { let x = *x.add(i); let ax = x.abs(); if ax > amax { amax = ax; max = x; } } if amax == 0. { // all zero for i in 0..n { *ls.add(i) = 0; } return 0.; } let mut iscale = -(nmax as f32) / max; if rmse_type == 0 { for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); *ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8; } return 1.0 / iscale; } let weight_type = rmse_type % 2; let mut sumlx = 0f32; let mut suml2 = 0f32; for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); let l = l.clamp(-nmax, nmax - 1); *ls.add(i) = (l + nmax) as i8; let w = if weight_type == 1 { x * x } else { 1.0 }; let l = l as f32; sumlx += w * x * l; suml2 += w * l * l; } let mut scale = sumlx / suml2; let mut best = scale * sumlx; for _itry in 0..3 { let iscale = 1.0 / scale; let mut slx = 0f32; let mut sl2 = 0f32; let mut changed = false; for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); let l = l.clamp(-nmax, nmax - 1); if l + nmax != *ls.add(i) as i32 { changed = true; } let w = if weight_type == 1 { x * x } else { 1f32 }; let l = l as f32; slx += w * x * l; sl2 += w * l * l; } if !changed || sl2 == 0.0 || slx * slx <= best * sl2 { break; } for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); *ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8; } sumlx = slx; suml2 = sl2; scale = sumlx / suml2; best = scale * sumlx; } for _itry in 0..5 { let mut n_changed = 0; for i in 0..n { let x = *x.add(i); let w = if weight_type == 1 { x * x } else { 1. }; let l = *ls.add(i) as i32 - nmax; let mut slx = sumlx - w * x * l as f32; if slx > 0. { let mut sl2 = suml2 - w * l as f32 * l as f32; let new_l = nearest_int(x * sl2 / slx); let new_l = new_l.clamp(-nmax, nmax - 1); if new_l != l { slx += w * x * new_l as f32; sl2 += w * new_l as f32 * new_l as f32; if sl2 > 0. && slx * slx * suml2 > sumlx * sumlx * sl2 { *ls.add(i) = (nmax + new_l) as i8; sumlx = slx; suml2 = sl2; scale = sumlx / suml2; best = scale * sumlx; n_changed += 1; } } } } if n_changed == 0 { break; } } if rmse_type < 3 { return scale; } for is in -4..4 { if is == 0 { continue; } iscale = -(nmax as f32 + 0.1f32 * is as f32) / max; let mut sumlx = 0.; let mut suml2 = 0.; for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); let l = l.clamp(-nmax, nmax - 1); let w = if weight_type == 1 { x * x } else { 1. }; let l = l as f32; sumlx += w * x * l; suml2 += w * l * l; } if suml2 > 0. && sumlx * sumlx > best * suml2 { for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); *ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8; } scale = sumlx / suml2; best = scale * sumlx; } } scale } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L224 pub(super) fn make_qkx1_quants(nmax: i32, ntry: usize, x: &[f32]) -> (f32, f32) { let n = x.len(); let mut l = vec![0; n]; // Get min/max let min = *x .iter() .take(n) .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&x[0]); let max = *x.iter().max_by(|a, b| a.total_cmp(b)).unwrap_or(&x[0]); // If min == max, all values are the same => nothing to do here if max == min { return (0.0, 0.0); } // Ensure min <= 0.0 let mut min = min.min(0.); // Compute scale and inverse scale let mut iscale = nmax as f32 / (max - min); let mut scale = 1.0 / iscale; for _ in 0..ntry { let mut sumlx = 0.0; let mut suml2 = 0; let mut did_change = false; for (i, value) in x.iter().enumerate().take(n) { let li = nearest_int(iscale * (value - min)).clamp(0, nmax); let clamped_li = li as u8; if clamped_li != l[i] { l[i] = clamped_li; did_change = true; } sumlx += (value - min) * li as f32; suml2 += li * li; } scale = sumlx / suml2 as f32; let sum: f32 = x .iter() .take(n) .zip(l.iter().take(n)) .map(|(xi, &li)| xi - scale * li as f32) .sum(); min = sum / n as f32; if min > 0.0 { min = 0.0; } iscale = 1.0 / scale; if !did_change { break; } } (scale, -min) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L165 pub(super) fn make_q3_quants(x: &[f32], nmax: i32, do_rmse: bool) -> f32 { let n = x.len(); let mut l = vec![0i8; n]; let mut max = 0.0; let mut amax = 0.0; for &xi in x.iter().take(n) { let ax = xi.abs(); if ax > amax { amax = ax; max = xi; } } if amax == 0.0 { return 0.0; } let iscale = -(nmax as f32) / max; if do_rmse { let mut sumlx = 0.0; let mut suml2 = 0.0; for i in 0..n { let li = (iscale * x[i]).round() as i32; let li = li.clamp(-nmax, nmax - 1); l[i] = li as i8; let w = x[i] * x[i]; sumlx += w * x[i] * li as f32; suml2 += w * (li * li) as f32; } for _ in 0..5 { let mut n_changed = 0; for i in 0..n { let w = x[i] * x[i]; let mut slx = sumlx - w * x[i] * l[i] as f32; if slx > 0.0 { let mut sl2 = suml2 - w * (l[i] as i32 * l[i] as i32) as f32; let mut new_l = (x[i] * sl2 / slx).round() as i32; new_l = new_l.clamp(-nmax, nmax - 1); if new_l != l[i] as i32 { slx += w * x[i] * new_l as f32; sl2 += w * (new_l * new_l) as f32; if sl2 > 0.0 && slx * slx * suml2 > sumlx * sumlx * sl2 { l[i] = new_l as i8; sumlx = slx; suml2 = sl2; n_changed += 1; } } } } if n_changed == 0 { break; } } for li in l.iter_mut() { *li += nmax as i8; } return sumlx / suml2; } for i in 0..n { let li = (iscale * x[i]).round() as i32; l[i] = (li.clamp(-nmax, nmax - 1) + nmax) as i8; } 1.0 / iscale }
candle/candle-core/src/quantized/utils.rs/0
{ "file_path": "candle/candle-core/src/quantized/utils.rs", "repo_id": "candle", "token_count": 5775 }
20
[package] name = "candle-datasets" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] byteorder = { workspace = true } candle = { workspace = true } candle-nn = { workspace = true } hf-hub = { workspace = true} intel-mkl-src = { workspace = true, optional = true } memmap2 = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } rand = { workspace = true } thiserror = { workspace = true } parquet = { workspace = true} image = { workspace = true }
candle/candle-datasets/Cargo.toml/0
{ "file_path": "candle/candle-datasets/Cargo.toml", "repo_id": "candle", "token_count": 201 }
21
//! BEiT: BERT Pre-Training of Image Transformers //! https://github.com/microsoft/unilm/tree/master/beit #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::Parser; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::beit; /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 384, 384). Beit special normalization is applied. pub fn load_image384_beit_norm<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.5f32, 0.5, 0.5], &Device::Cpu)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = load_image384_beit_norm(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("vincent-espitalier/candle-beit".into()); api.get("beit_base_patch16_384.in22k_ft_in22k_in1k.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = beit::vit_base(vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/beit/main.rs/0
{ "file_path": "candle/candle-examples/examples/beit/main.rs", "repo_id": "candle", "token_count": 1178 }
22
#include <stdint.h> #include "reduction_utils.cuh" template <typename scalar_t> __device__ void rms_norm_kernel(scalar_t *__restrict__ out, // [num_tokens, hidden_size] const scalar_t *__restrict__ input, // [num_tokens, hidden_size] const float epsilon, const uint32_t num_tokens, const uint32_t hidden_size) { __shared__ float s_variance; float variance = 0.0f; for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { const float x = (float)input[blockIdx.x * hidden_size + idx]; variance += x * x; } variance = blockReduceSum<float>(variance); if (threadIdx.x == 0) { s_variance = rsqrtf(variance / hidden_size + epsilon); } __syncthreads(); for (int idx = threadIdx.x; idx < hidden_size; idx += blockDim.x) { float x = (float)input[blockIdx.x * hidden_size + idx]; out[blockIdx.x * hidden_size + idx] = ((scalar_t)(x * s_variance)); } } extern "C" __global__ void rms_f32( float *__restrict__ out, // [num_tokens, hidden_size] const float *__restrict__ input, // [num_tokens, hidden_size] const float epsilon, const uint32_t num_tokens, const uint32_t hidden_size) { rms_norm_kernel(out, input, epsilon, num_tokens, hidden_size); }
candle/candle-examples/examples/custom-ops/kernels/layernorm_kernels.cu/0
{ "file_path": "candle/candle-examples/examples/custom-ops/kernels/layernorm_kernels.cu", "repo_id": "candle", "token_count": 561 }
23
#![allow(unused)] use anyhow::{Context, Result}; use std::sync::{Arc, Mutex}; pub const SAMPLE_RATE: usize = 24_000; pub(crate) struct AudioOutputData_ { resampled_data: std::collections::VecDeque<f32>, resampler: rubato::FastFixedIn<f32>, output_buffer: Vec<f32>, input_buffer: Vec<f32>, input_len: usize, } impl AudioOutputData_ { pub(crate) fn new(input_sample_rate: usize, output_sample_rate: usize) -> Result<Self> { use rubato::Resampler; let resampled_data = std::collections::VecDeque::with_capacity(output_sample_rate * 10); let resample_ratio = output_sample_rate as f64 / input_sample_rate as f64; let resampler = rubato::FastFixedIn::new( resample_ratio, f64::max(resample_ratio, 1.0), rubato::PolynomialDegree::Septic, 1024, 1, )?; let input_buffer = resampler.input_buffer_allocate(true).remove(0); let output_buffer = resampler.output_buffer_allocate(true).remove(0); Ok(Self { resampled_data, resampler, input_buffer, output_buffer, input_len: 0, }) } pub fn reset(&mut self) { use rubato::Resampler; self.output_buffer.fill(0.); self.input_buffer.fill(0.); self.resampler.reset(); self.resampled_data.clear(); } pub(crate) fn take_all(&mut self) -> Vec<f32> { let mut data = Vec::with_capacity(self.resampled_data.len()); while let Some(elem) = self.resampled_data.pop_back() { data.push(elem); } data } pub(crate) fn is_empty(&self) -> bool { self.resampled_data.is_empty() } // Assumes that the input buffer is large enough. fn push_input_buffer(&mut self, samples: &[f32]) { self.input_buffer[self.input_len..self.input_len + samples.len()].copy_from_slice(samples); self.input_len += samples.len() } pub(crate) fn push_samples(&mut self, samples: &[f32]) -> Result<()> { use rubato::Resampler; let mut pos_in = 0; loop { let rem = self.input_buffer.len() - self.input_len; let pos_end = usize::min(pos_in + rem, samples.len()); self.push_input_buffer(&samples[pos_in..pos_end]); pos_in = pos_end; if self.input_len < self.input_buffer.len() { break; } let (_, out_len) = self.resampler.process_into_buffer( &[&self.input_buffer], &mut [&mut self.output_buffer], None, )?; for &elem in self.output_buffer[..out_len].iter() { self.resampled_data.push_front(elem) } self.input_len = 0; } Ok(()) } } type AudioOutputData = Arc<Mutex<AudioOutputData_>>; pub(crate) fn setup_output_stream() -> Result<(cpal::Stream, AudioOutputData)> { use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; println!("Setup audio output stream!"); let host = cpal::default_host(); let device = host .default_output_device() .context("no output device available")?; let mut supported_configs_range = device.supported_output_configs()?; let config_range = match supported_configs_range.find(|c| c.channels() == 1) { // On macOS, it's commonly the case that there are only stereo outputs. None => device .supported_output_configs()? .next() .context("no audio output available")?, Some(config_range) => config_range, }; let sample_rate = cpal::SampleRate(SAMPLE_RATE as u32).clamp( config_range.min_sample_rate(), config_range.max_sample_rate(), ); let config: cpal::StreamConfig = config_range.with_sample_rate(sample_rate).into(); let channels = config.channels as usize; println!( "cpal device: {} {} {config:?}", device.name().unwrap_or_else(|_| "unk".to_string()), config.sample_rate.0 ); let audio_data = Arc::new(Mutex::new(AudioOutputData_::new( SAMPLE_RATE, config.sample_rate.0 as usize, )?)); let ad = audio_data.clone(); let stream = device.build_output_stream( &config, move |data: &mut [f32], _: &cpal::OutputCallbackInfo| { data.fill(0.); let mut ad = ad.lock().unwrap(); let mut last_elem = 0f32; for (idx, elem) in data.iter_mut().enumerate() { if idx % channels == 0 { match ad.resampled_data.pop_back() { None => break, Some(v) => { last_elem = v; *elem = v } } } else { *elem = last_elem } } }, move |err| eprintln!("cpal error: {err}"), None, // None=blocking, Some(Duration)=timeout )?; stream.play()?; Ok((stream, audio_data)) } pub(crate) fn setup_input_stream() -> Result<(cpal::Stream, AudioOutputData)> { use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; println!("Setup audio input stream!"); let host = cpal::default_host(); let device = host .default_input_device() .context("no input device available")?; let mut supported_configs_range = device.supported_input_configs()?; let config_range = supported_configs_range .find(|c| c.channels() == 1) .context("no audio input available")?; let sample_rate = cpal::SampleRate(SAMPLE_RATE as u32).clamp( config_range.min_sample_rate(), config_range.max_sample_rate(), ); let config: cpal::StreamConfig = config_range.with_sample_rate(sample_rate).into(); println!( "cpal device: {} {} {config:?}", device.name().unwrap_or_else(|_| "unk".to_string()), config.sample_rate.0 ); let audio_data = Arc::new(Mutex::new(AudioOutputData_::new( config.sample_rate.0 as usize, SAMPLE_RATE, )?)); let ad = audio_data.clone(); let stream = device.build_input_stream( &config, move |data: &[f32], _: &cpal::InputCallbackInfo| { let mut ad = ad.lock().unwrap(); if let Err(err) = ad.push_samples(data) { eprintln!("error processing audio input {err:?}") } }, move |err| eprintln!("cpal error: {err}"), None, // None=blocking, Some(Duration)=timeout )?; stream.play()?; Ok((stream, audio_data)) } fn conv<T>(samples: &mut Vec<f32>, data: std::borrow::Cow<symphonia::core::audio::AudioBuffer<T>>) where T: symphonia::core::sample::Sample, f32: symphonia::core::conv::FromSample<T>, { use symphonia::core::audio::Signal; use symphonia::core::conv::FromSample; samples.extend(data.chan(0).iter().map(|v| f32::from_sample(*v))) } pub(crate) fn pcm_decode<P: AsRef<std::path::Path>>(path: P) -> Result<(Vec<f32>, u32)> { use symphonia::core::audio::{AudioBufferRef, Signal}; let src = std::fs::File::open(path)?; let mss = symphonia::core::io::MediaSourceStream::new(Box::new(src), Default::default()); let hint = symphonia::core::probe::Hint::new(); let meta_opts: symphonia::core::meta::MetadataOptions = Default::default(); let fmt_opts: symphonia::core::formats::FormatOptions = Default::default(); let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?; let mut format = probed.format; let track = format .tracks() .iter() .find(|t| t.codec_params.codec != symphonia::core::codecs::CODEC_TYPE_NULL) .expect("no supported audio tracks"); let mut decoder = symphonia::default::get_codecs() .make(&track.codec_params, &Default::default()) .expect("unsupported codec"); let track_id = track.id; let sample_rate = track.codec_params.sample_rate.unwrap_or(0); let mut pcm_data = Vec::new(); while let Ok(packet) = format.next_packet() { while !format.metadata().is_latest() { format.metadata().pop(); } if packet.track_id() != track_id { continue; } match decoder.decode(&packet)? { AudioBufferRef::F32(buf) => pcm_data.extend(buf.chan(0)), AudioBufferRef::U8(data) => conv(&mut pcm_data, data), AudioBufferRef::U16(data) => conv(&mut pcm_data, data), AudioBufferRef::U24(data) => conv(&mut pcm_data, data), AudioBufferRef::U32(data) => conv(&mut pcm_data, data), AudioBufferRef::S8(data) => conv(&mut pcm_data, data), AudioBufferRef::S16(data) => conv(&mut pcm_data, data), AudioBufferRef::S24(data) => conv(&mut pcm_data, data), AudioBufferRef::S32(data) => conv(&mut pcm_data, data), AudioBufferRef::F64(data) => conv(&mut pcm_data, data), } } Ok((pcm_data, sample_rate)) } pub(crate) fn resample(pcm_in: &[f32], sr_in: usize, sr_out: usize) -> Result<Vec<f32>> { use rubato::Resampler; let mut pcm_out = Vec::with_capacity((pcm_in.len() as f64 * sr_out as f64 / sr_in as f64) as usize + 1024); let mut resampler = rubato::FftFixedInOut::<f32>::new(sr_in, sr_out, 1024, 1)?; let mut output_buffer = resampler.output_buffer_allocate(true); let mut pos_in = 0; while pos_in + resampler.input_frames_next() < pcm_in.len() { let (in_len, out_len) = resampler.process_into_buffer(&[&pcm_in[pos_in..]], &mut output_buffer, None)?; pos_in += in_len; pcm_out.extend_from_slice(&output_buffer[0][..out_len]); } if pos_in < pcm_in.len() { let (_in_len, out_len) = resampler.process_partial_into_buffer( Some(&[&pcm_in[pos_in..]]), &mut output_buffer, None, )?; pcm_out.extend_from_slice(&output_buffer[0][..out_len]); } Ok(pcm_out) }
candle/candle-examples/examples/encodec/audio_io.rs/0
{ "file_path": "candle/candle-examples/examples/encodec/audio_io.rs", "repo_id": "candle", "token_count": 4805 }
24
# candle-llava LLaVA (Large Language-and-Vision Assistant) is an end-to-end trained large multimodal model. This example is from [candle-llava](https://github.com/chenwanqq/candle-llava) The code is based on [https://github.com/haotian-liu/LLaVA](https://github.com/haotian-liu/LLaVA), Hence the llava-hf version of config may perform differently. ## model zoo * [liuhaotian/LLaVA](https://huggingface.co/liuhaotian) * [llava-hf](https://huggingface.co/llava-hf) Right now this has been tested on `liuhaotian/llava-v1.6-vicuna-7b` and `llava-hf/llava-v1.6-vicuna-7b-hf`. Memory usage might have room for optimization. ## Tokenizer Setup The llava-hf models contain a `tokenizer.json` file so can be used directly with the `-hf` command line flag. For the original llava models, you can use the following code to generate the `tokenizer.json` file. ```bash conda create -n llava python=3.10 pip install transformers protobuf conda activate llava python -c "from transformers import AutoTokenizer;tokenizer=AutoTokenizer.from_pretrained('liuhaotian/llava-v1.6-vicuna-7b');tokenizer.save_pretrained('tokenizer')" ``` Then the `tokenizer.json` file should be in `tokenizer/tokenizer.json` (which is the default path). ## eval ```bash cargo run --example llava --features cuda -- --image-file "llava_logo.png" --prompt "is this a cat?" --hf # default args, use llava-hf/llava-v1.6-vicuna-7b-hf. image-file is required^_^ cargo run --example llava --features cuda -- --model-path liuhaotian/llava-v1.6-vicuna-7b --image-file "llava_logo.png" --prompt "is this a cat?" # use liuhaotian/llava-v1.6-vicuna-7b, tokenizer setup should be done ``` ## Major Limitations 1. Currently only support llama-2/vicuna llm. Haven't supoort Mistral yet. 2. There are some ops like split, nonzero and where are not supported by candle. 3. Lack of quantization and LoRA support.
candle/candle-examples/examples/llava/readme.md/0
{ "file_path": "candle/candle-examples/examples/llava/readme.md", "repo_id": "candle", "token_count": 671 }
25
# candle-mobileclip MobileCLIP is family of efficient CLIP-like models using FastViT-based image encoders. See [MobileCLIP: Fast Image-Text Models through Multi-Modal Reinforced Training](https://arxiv.org/abs/2311.17049) ## Running on an example on cpu ``` $ cargo run --example mobileclip --release -- --images "candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg","candle-examples/examples/yolo-v8/assets/bike.jpg" --cpu --sequences "a cycling race","a photo of two cats","a robot holding a candle" softmax_image_vec: [2.4819004e-5, 3.81081e-6, 0.9999714, 0.9999738, 2.382714e-5, 2.3317718e-6] Results for image: candle-examples/examples/stable-diffusion/assets/stable-diffusion-xl.jpg Probability: 0.0025% Text: a cycling race Probability: 0.0004% Text: a photo of two cats Probability: 99.9971% Text: a robot holding a candle Results for image: candle-examples/examples/yolo-v8/assets/bike.jpg Probability: 99.9974% Text: a cycling race Probability: 0.0024% Text: a photo of two cats Probability: 0.0002% Text: a robot holding a candle ```
candle/candle-examples/examples/mobileclip/README.md/0
{ "file_path": "candle/candle-examples/examples/mobileclip/README.md", "repo_id": "candle", "token_count": 379 }
26
# candle-reinforcement-learning Reinforcement Learning examples for candle. This has been tested with `gymnasium` version `0.29.1`. You can install the Python package with: ```bash pip install "gymnasium[accept-rom-license]" ``` In order to run the examples, use the following commands. Note the additional `--package` flag to ensure that there is no conflict with the `candle-pyo3` crate. For the Policy Gradient example: ```bash cargo run --example reinforcement-learning --features=pyo3 --package candle-examples -- pg ``` For the Deep Deterministic Policy Gradient example: ```bash cargo run --example reinforcement-learning --features=pyo3 --package candle-examples -- ddpg ```
candle/candle-examples/examples/reinforcement-learning/README.md/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/README.md", "repo_id": "candle", "token_count": 198 }
27
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use clap::{Parser, ValueEnum}; use candle_transformers::models::quantized_rwkv_v5::Model as Q5; use candle_transformers::models::quantized_rwkv_v6::Model as Q6; use candle_transformers::models::rwkv_v5::{Config, Model as M5, State, Tokenizer}; use candle_transformers::models::rwkv_v6::Model as M6; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; const EOS_TOKEN_ID: u32 = 261; enum Model { M5(M5), Q5(Q5), M6(M6), Q6(Q6), } impl Model { fn forward(&self, xs: &Tensor, state: &mut State) -> candle::Result<Tensor> { match self { Self::M5(m) => m.forward(xs, state), Self::Q5(m) => m.forward(xs, state), Self::M6(m) => m.forward(xs, state), Self::Q6(m) => m.forward(xs, state), } } } struct TextGeneration { model: Model, config: Config, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, config: Config, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, config, tokenizer, logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; let mut tokens = self.tokenizer.encode(prompt)?; let mut generated_tokens = 0usize; let mut state = State::new(1, &self.config, &self.device)?; let mut next_logits = None; for &t in tokens.iter() { let input = Tensor::new(&[[t]], &self.device)?; let logits = self.model.forward(&input, &mut state)?; next_logits = Some(logits); print!("{}", self.tokenizer.decode(&[t])?) } std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); for _ in 0..sample_len { let logits = match next_logits.as_ref() { Some(logits) => logits, None => anyhow::bail!("cannot work on an empty prompt"), }; let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == EOS_TOKEN_ID || next_token == 0 { break; } print!("{}", self.tokenizer.decode(&[next_token])?); std::io::stdout().flush()?; let input = Tensor::new(&[[next_token]], &self.device)?; next_logits = Some(self.model.forward(&input, &mut state)?) } let dt = start_gen.elapsed(); println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, ValueEnum, Clone, Copy, PartialEq, Eq, Debug)] enum Which { Eagle7b, World1b5, World3b, World6_1b6, } impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl Which { fn model_id(&self) -> &'static str { match self { Self::Eagle7b => "RWKV/v5-Eagle-7B-HF", Self::World1b5 => "RWKV/rwkv-5-world-1b5", Self::World3b => "RWKV/rwkv-5-world-3b", Self::World6_1b6 => "paperfun/rwkv", } } fn revision(&self) -> &'static str { match self { Self::Eagle7b => "refs/pr/1", Self::World1b5 | Self::World3b => "refs/pr/2", Self::World6_1b6 => "main", } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long, default_value = "world1b5")] which: Which, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] config_file: Option<String>, #[arg(long)] quantized: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id .unwrap_or_else(|| args.which.model_id().to_string()), RepoType::Model, args.revision .unwrap_or_else(|| args.which.revision().to_string()), )); let tokenizer = match args.tokenizer { Some(file) => std::path::PathBuf::from(file), None => api .model("lmz/candle-rwkv".to_string()) .get("rwkv_vocab_v20230424.json")?, }; let config_filename = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { if args.quantized { vec![match args.which { Which::World1b5 => api .model("lmz/candle-rwkv".to_string()) .get("world1b5-q4k.gguf")?, Which::World3b => api .model("lmz/candle-rwkv".to_string()) .get("world3b-q4k.gguf")?, Which::Eagle7b => api .model("lmz/candle-rwkv".to_string()) .get("eagle7b-q4k.gguf")?, Which::World6_1b6 => repo.get("rwkv-6-world-1b6-q4k.gguf")?, }] } else { vec![match args.which { Which::World1b5 | Which::World3b | Which::Eagle7b => { repo.get("model.safetensors")? } Which::World6_1b6 => repo.get("rwkv-6-world-1b6.safetensors")?, }] } } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::new(tokenizer)?; let start = std::time::Instant::now(); let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?; let device = candle_examples::device(args.cpu)?; let model = if args.quantized { let filename = &filenames[0]; let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(filename, &device)?; match args.which { Which::World1b5 | Which::World3b | Which::Eagle7b => Model::Q5(Q5::new(&config, vb)?), Which::World6_1b6 => Model::Q6(Q6::new(&config, vb)?), } } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, DType::F32, &device)? }; match args.which { Which::World1b5 | Which::World3b | Which::Eagle7b => Model::M5(M5::new(&config, vb)?), Which::World6_1b6 => Model::M6(M6::new(&config, vb)?), } }; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, config, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/rwkv/main.rs/0
{ "file_path": "candle/candle-examples/examples/rwkv/main.rs", "repo_id": "candle", "token_count": 5059 }
28
// https://github.com/openai/whisper/blob/main/whisper/model.py/rgs // TODO: // - Batch size greater than 1. // - More token filters (SuppressBlanks, ApplyTimestampRules). #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{Error as E, Result}; use candle::{Device, IndexOp, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::Api, Repo, RepoType}; use rand::{distributions::Distribution, SeedableRng}; use tokenizers::Tokenizer; mod multilingual; mod pcm_decode; use candle_transformers::models::whisper::{self as m, audio, Config}; pub enum Model { Normal(m::model::Whisper), Quantized(m::quantized_model::Whisper), } // Maybe we should use some traits rather than doing the dispatch for all these. impl Model { pub fn config(&self) -> &Config { match self { Self::Normal(m) => &m.config, Self::Quantized(m) => &m.config, } } pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.encoder.forward(x, flush), Self::Quantized(m) => m.encoder.forward(x, flush), } } pub fn decoder_forward( &mut self, x: &Tensor, xa: &Tensor, flush: bool, ) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.forward(x, xa, flush), Self::Quantized(m) => m.decoder.forward(x, xa, flush), } } pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.final_linear(x), Self::Quantized(m) => m.decoder.final_linear(x), } } } #[allow(dead_code)] #[derive(Debug, Clone)] struct DecodingResult { tokens: Vec<u32>, text: String, avg_logprob: f64, no_speech_prob: f64, temperature: f64, compression_ratio: f64, } #[allow(dead_code)] #[derive(Debug, Clone)] struct Segment { start: f64, duration: f64, dr: DecodingResult, } struct Decoder { model: Model, rng: rand::rngs::StdRng, task: Option<Task>, timestamps: bool, verbose: bool, tokenizer: Tokenizer, suppress_tokens: Tensor, sot_token: u32, transcribe_token: u32, translate_token: u32, eot_token: u32, no_speech_token: u32, no_timestamps_token: u32, language_token: Option<u32>, } impl Decoder { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, device: &Device, language_token: Option<u32>, task: Option<Task>, timestamps: bool, verbose: bool, ) -> Result<Self> { let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?; // Suppress the notimestamps token when in timestamps mode. // https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L452 let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32) .map(|i| { if model.config().suppress_tokens.contains(&i) || timestamps && i == no_timestamps_token { f32::NEG_INFINITY } else { 0f32 } }) .collect(); let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?; let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?; let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?; let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?; let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?; let no_speech_token = m::NO_SPEECH_TOKENS .iter() .find_map(|token| token_id(&tokenizer, token).ok()); let no_speech_token = match no_speech_token { None => anyhow::bail!("unable to find any non-speech token"), Some(n) => n, }; Ok(Self { model, rng: rand::rngs::StdRng::seed_from_u64(seed), tokenizer, task, timestamps, verbose, suppress_tokens, sot_token, transcribe_token, translate_token, eot_token, no_speech_token, language_token, no_timestamps_token, }) } fn decode(&mut self, mel: &Tensor, t: f64) -> Result<DecodingResult> { let model = &mut self.model; let audio_features = model.encoder_forward(mel, true)?; if self.verbose { println!("audio features: {:?}", audio_features.dims()); } let sample_len = model.config().max_target_positions / 2; let mut sum_logprob = 0f64; let mut no_speech_prob = f64::NAN; let mut tokens = vec![self.sot_token]; if let Some(language_token) = self.language_token { tokens.push(language_token); } match self.task { None | Some(Task::Transcribe) => tokens.push(self.transcribe_token), Some(Task::Translate) => tokens.push(self.translate_token), } if !self.timestamps { tokens.push(self.no_timestamps_token); } for i in 0..sample_len { let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?; // The model expects a batch dim but this inference loop does not handle // it so we add it at this point. let tokens_t = tokens_t.unsqueeze(0)?; let ys = model.decoder_forward(&tokens_t, &audio_features, i == 0)?; // Extract the no speech probability on the first iteration by looking at the first // token logits and the probability for the according token. if i == 0 { let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?; no_speech_prob = softmax(&logits, 0)? .i(self.no_speech_token as usize)? .to_scalar::<f32>()? as f64; } let (_, seq_len, _) = ys.dims3()?; let logits = model .decoder_final_linear(&ys.i((..1, seq_len - 1..))?)? .i(0)? .i(0)?; // TODO: Besides suppress tokens, we should apply the heuristics from // ApplyTimestampRules, i.e.: // - Timestamps come in pairs, except before EOT. // - Timestamps should be non-decreasing. // - If the sum of the probabilities of timestamps is higher than any other tokens, // only consider timestamps when sampling. // https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L439 let logits = logits.broadcast_add(&self.suppress_tokens)?; let next_token = if t > 0f64 { let prs = softmax(&(&logits / t)?, 0)?; let logits_v: Vec<f32> = prs.to_vec1()?; let distr = rand::distributions::WeightedIndex::new(&logits_v)?; distr.sample(&mut self.rng) as u32 } else { let logits_v: Vec<f32> = logits.to_vec1()?; logits_v .iter() .enumerate() .max_by(|(_, u), (_, v)| u.total_cmp(v)) .map(|(i, _)| i as u32) .unwrap() }; tokens.push(next_token); let prob = softmax(&logits, candle::D::Minus1)? .i(next_token as usize)? .to_scalar::<f32>()? as f64; if next_token == self.eot_token || tokens.len() > model.config().max_target_positions { break; } sum_logprob += prob.ln(); } let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?; let avg_logprob = sum_logprob / tokens.len() as f64; Ok(DecodingResult { tokens, text, avg_logprob, no_speech_prob, temperature: t, compression_ratio: f64::NAN, }) } fn decode_with_fallback(&mut self, segment: &Tensor) -> Result<DecodingResult> { for (i, &t) in m::TEMPERATURES.iter().enumerate() { let dr: Result<DecodingResult> = self.decode(segment, t); if i == m::TEMPERATURES.len() - 1 { return dr; } // On errors, we try again with a different temperature. match dr { Ok(dr) => { let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD || dr.avg_logprob < m::LOGPROB_THRESHOLD; if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD { return Ok(dr); } } Err(err) => { println!("Error running at {t}: {err}") } } } unreachable!() } fn run(&mut self, mel: &Tensor) -> Result<Vec<Segment>> { let (_, _, content_frames) = mel.dims3()?; let mut seek = 0; let mut segments = vec![]; while seek < content_frames { let start = std::time::Instant::now(); let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let segment_size = usize::min(content_frames - seek, m::N_FRAMES); let mel_segment = mel.narrow(2, seek, segment_size)?; let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let dr = self.decode_with_fallback(&mel_segment)?; seek += segment_size; if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD { println!("no speech detected, skipping {seek} {dr:?}"); continue; } let segment = Segment { start: time_offset, duration: segment_duration, dr, }; if self.timestamps { println!( "{:.1}s -- {:.1}s", segment.start, segment.start + segment.duration, ); let mut tokens_to_decode = vec![]; let mut prev_timestamp_s = 0f32; for &token in segment.dr.tokens.iter() { if token == self.sot_token || token == self.eot_token { continue; } // The no_timestamp_token is the last before the timestamp ones. if token > self.no_timestamps_token { let timestamp_s = (token - self.no_timestamps_token + 1) as f32 / 50.; if !tokens_to_decode.is_empty() { let text = self .tokenizer .decode(&tokens_to_decode, true) .map_err(E::msg)?; println!(" {:.1}s-{:.1}s: {}", prev_timestamp_s, timestamp_s, text); tokens_to_decode.clear() } prev_timestamp_s = timestamp_s; } else { tokens_to_decode.push(token) } } if !tokens_to_decode.is_empty() { let text = self .tokenizer .decode(&tokens_to_decode, true) .map_err(E::msg)?; if !text.is_empty() { println!(" {:.1}s-...: {}", prev_timestamp_s, text); } tokens_to_decode.clear() } } else { println!( "{:.1}s -- {:.1}s: {}", segment.start, segment.start + segment.duration, segment.dr.text, ) } if self.verbose { println!("{seek}: {segment:?}, in {:?}", start.elapsed()); } segments.push(segment) } Ok(segments) } } pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> { match tokenizer.token_to_id(token) { None => candle::bail!("no token-id for {token}"), Some(id) => Ok(id), } } #[derive(Clone, Copy, Debug, ValueEnum)] enum Task { Transcribe, Translate, } #[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)] enum WhichModel { Tiny, #[value(name = "tiny.en")] TinyEn, Base, #[value(name = "base.en")] BaseEn, Small, #[value(name = "small.en")] SmallEn, Medium, #[value(name = "medium.en")] MediumEn, Large, LargeV2, LargeV3, #[value(name = "distil-medium.en")] DistilMediumEn, #[value(name = "distil-large-v2")] DistilLargeV2, #[value(name = "distil-large-v3")] DistilLargeV3, } impl WhichModel { fn is_multilingual(&self) -> bool { match self { Self::Tiny | Self::Base | Self::Small | Self::Medium | Self::Large | Self::LargeV2 | Self::LargeV3 | Self::DistilLargeV2 | Self::DistilLargeV3 => true, Self::TinyEn | Self::BaseEn | Self::SmallEn | Self::MediumEn | Self::DistilMediumEn => { false } } } fn model_and_revision(&self) -> (&'static str, &'static str) { match self { Self::Tiny => ("openai/whisper-tiny", "main"), Self::TinyEn => ("openai/whisper-tiny.en", "refs/pr/15"), Self::Base => ("openai/whisper-base", "refs/pr/22"), Self::BaseEn => ("openai/whisper-base.en", "refs/pr/13"), Self::Small => ("openai/whisper-small", "main"), Self::SmallEn => ("openai/whisper-small.en", "refs/pr/10"), Self::Medium => ("openai/whisper-medium", "main"), Self::MediumEn => ("openai/whisper-medium.en", "main"), Self::Large => ("openai/whisper-large", "refs/pr/36"), Self::LargeV2 => ("openai/whisper-large-v2", "refs/pr/57"), Self::LargeV3 => ("openai/whisper-large-v3", "main"), Self::DistilMediumEn => ("distil-whisper/distil-medium.en", "main"), Self::DistilLargeV2 => ("distil-whisper/distil-large-v2", "main"), Self::DistilLargeV3 => ("distil-whisper/distil-large-v3", "main"), } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] model_id: Option<String>, /// The model to use, check out available models: /// https://huggingface.co/models?search=whisper #[arg(long)] revision: Option<String>, /// The model to be used, can be tiny, small, medium. #[arg(long, default_value = "tiny.en")] model: WhichModel, /// The input to be processed, in wav format, will default to `jfk.wav`. Alternatively /// this can be set to sample:jfk, sample:gb1, ... to fetch a sample from the following /// repo: https://huggingface.co/datasets/Narsil/candle_demo/ #[arg(long)] input: Option<String>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] quantized: bool, /// Language. #[arg(long)] language: Option<String>, /// Task, when no task is specified, the input tokens contain only the sot token which can /// improve things when in no-timestamp mode. #[arg(long)] task: Option<Task>, /// Timestamps mode, this is not fully implemented yet. #[arg(long)] timestamps: bool, /// Print the full DecodingResult structure rather than just the text. #[arg(long)] verbose: bool, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(args.cpu)?; let (default_model, default_revision) = if args.quantized { ("lmz/candle-whisper", "main") } else { args.model.model_and_revision() }; let default_model = default_model.to_string(); let default_revision = default_revision.to_string(); let (model_id, revision) = match (args.model_id, args.revision) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let (config_filename, tokenizer_filename, weights_filename, input) = { let api = Api::new()?; let dataset = api.dataset("Narsil/candle-examples".to_string()); let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let sample = if let Some(input) = args.input { if let Some(sample) = input.strip_prefix("sample:") { dataset.get(&format!("samples_{sample}.wav"))? } else { std::path::PathBuf::from(input) } } else { println!("No audio file submitted: Downloading https://huggingface.co/datasets/Narsil/candle_demo/blob/main/samples_jfk.wav"); dataset.get("samples_jfk.wav")? }; let (config, tokenizer, model) = if args.quantized { let ext = match args.model { WhichModel::TinyEn => "tiny-en", WhichModel::Tiny => "tiny", _ => unimplemented!("no quantized support for {:?}", args.model), }; ( repo.get(&format!("config-{ext}.json"))?, repo.get(&format!("tokenizer-{ext}.json"))?, repo.get(&format!("model-{ext}-q80.gguf"))?, ) } else { let config = repo.get("config.json")?; let tokenizer = repo.get("tokenizer.json")?; let model = repo.get("model.safetensors")?; (config, tokenizer, model) }; (config, tokenizer, model, sample) }; let config: Config = serde_json::from_str(&std::fs::read_to_string(config_filename)?)?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let mel_bytes = match config.num_mel_bins { 80 => include_bytes!("melfilters.bytes").as_slice(), 128 => include_bytes!("melfilters128.bytes").as_slice(), nmel => anyhow::bail!("unexpected num_mel_bins {nmel}"), }; let mut mel_filters = vec![0f32; mel_bytes.len() / 4]; <byteorder::LittleEndian as byteorder::ByteOrder>::read_f32_into(mel_bytes, &mut mel_filters); let (pcm_data, sample_rate) = pcm_decode::pcm_decode(input)?; if sample_rate != m::SAMPLE_RATE as u32 { anyhow::bail!("input file must have a {} sampling rate", m::SAMPLE_RATE) } println!("pcm data loaded {}", pcm_data.len()); let mel = audio::pcm_to_mel(&config, &pcm_data, &mel_filters); let mel_len = mel.len(); let mel = Tensor::from_vec( mel, (1, config.num_mel_bins, mel_len / config.num_mel_bins), &device, )?; println!("loaded mel: {:?}", mel.dims()); let mut model = if args.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &weights_filename, &device, )?; Model::Quantized(m::quantized_model::Whisper::load(&vb, config)?) } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], m::DTYPE, &device)? }; Model::Normal(m::model::Whisper::load(&vb, config)?) }; let language_token = match (args.model.is_multilingual(), args.language) { (true, None) => Some(multilingual::detect_language(&mut model, &tokenizer, &mel)?), (false, None) => None, (true, Some(language)) => match token_id(&tokenizer, &format!("<|{language}|>")) { Ok(token_id) => Some(token_id), Err(_) => anyhow::bail!("language {language} is not supported"), }, (false, Some(_)) => { anyhow::bail!("a language cannot be set for non-multilingual models") } }; let mut dc = Decoder::new( model, tokenizer, args.seed, &device, language_token, args.task, args.timestamps, args.verbose, )?; dc.run(&mel)?; Ok(()) }
candle/candle-examples/examples/whisper/main.rs/0
{ "file_path": "candle/candle-examples/examples/whisper/main.rs", "repo_id": "candle", "token_count": 10793 }
29
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once namespace flash { //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool Varlen=true> struct BlockInfo { template<typename Params> __device__ BlockInfo(const Params &params, const int bidb) : sum_s_q(!Varlen || params.cu_seqlens_q == nullptr ? -1 : params.cu_seqlens_q[bidb]) , sum_s_k(!Varlen || params.cu_seqlens_k == nullptr || !params.is_seqlens_k_cumulative ? -1 : params.cu_seqlens_k[bidb]) , actual_seqlen_q(!Varlen || params.cu_seqlens_q == nullptr ? params.seqlen_q : params.cu_seqlens_q[bidb + 1] - sum_s_q) // If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb]. // Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K. , seqlen_k_cache(!Varlen || params.cu_seqlens_k == nullptr ? params.seqlen_k : (params.is_seqlens_k_cumulative ? params.cu_seqlens_k[bidb + 1] - sum_s_k : params.cu_seqlens_k[bidb])) , actual_seqlen_k(params.seqused_k ? params.seqused_k[bidb] : seqlen_k_cache + (params.knew_ptr == nullptr ? 0 : params.seqlen_knew)) { } template <typename index_t> __forceinline__ __device__ index_t q_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const { return sum_s_q == -1 ? bidb * batch_stride : uint32_t(sum_s_q) * row_stride; } template <typename index_t> __forceinline__ __device__ index_t k_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const { return sum_s_k == -1 ? bidb * batch_stride : uint32_t(sum_s_k) * row_stride; } const int sum_s_q; const int sum_s_k; const int actual_seqlen_q; // We have to have seqlen_k_cache declared before actual_seqlen_k, otherwise actual_seqlen_k is set to 0. const int seqlen_k_cache; const int actual_seqlen_k; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
candle/candle-flash-attn/kernels/block_info.h/0
{ "file_path": "candle/candle-flash-attn/kernels/block_info.h", "repo_id": "candle", "token_count": 857 }
30
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include <assert.h> #include <stdint.h> #include <stdlib.h> #include <cuda_fp16.h> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 #include <cuda_bf16.h> #endif #include <cute/tensor.hpp> #include <cutlass/array.h> #include <cutlass/cutlass.h> #include <cutlass/numeric_conversion.h> #include <cutlass/numeric_types.h> //////////////////////////////////////////////////////////////////////////////////////////////////// namespace flash { //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> __forceinline__ __device__ uint32_t relu2(const uint32_t x); template<> __forceinline__ __device__ uint32_t relu2<cutlass::half_t>(const uint32_t x) { uint32_t res; const uint32_t zero = 0u; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 asm volatile("max.f16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero)); #else asm volatile( \ "{\n" \ "\t .reg .f16x2 sela;\n" \ "\t set.gtu.u32.f16x2 sela, %1, %2;\n" \ "\t and.b32 %0, sela, %1;\n" "}\n" : "=r"(res) : "r"(x), "r"(zero)); #endif return res; } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 template<> __forceinline__ __device__ uint32_t relu2<cutlass::bfloat16_t>(const uint32_t x) { uint32_t res; const uint32_t zero = 0u; asm volatile("max.bf16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero)); return res; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 template<typename T> __forceinline__ __device__ uint32_t convert_relu2(const float2 x); template<> __forceinline__ __device__ uint32_t convert_relu2<cutlass::half_t>(const float2 x) { uint32_t res; const uint32_t a = reinterpret_cast<const uint32_t&>(x.x); const uint32_t b = reinterpret_cast<const uint32_t&>(x.y); asm volatile("cvt.rn.relu.f16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a)); return res; } template<> __forceinline__ __device__ uint32_t convert_relu2<cutlass::bfloat16_t>(const float2 x) { uint32_t res; const uint32_t a = reinterpret_cast<const uint32_t&>(x.x); const uint32_t b = reinterpret_cast<const uint32_t&>(x.y); asm volatile("cvt.rn.relu.bf16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a)); return res; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> struct MaxOp { __device__ __forceinline__ T operator()(T const & x, T const & y) { return x > y ? x : y; } }; template <> struct MaxOp<float> { // This is slightly faster __device__ __forceinline__ float operator()(float const &x, float const &y) { return max(x, y); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> struct SumOp { __device__ __forceinline__ T operator()(T const & x, T const & y) { return x + y; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<int THREADS> struct Allreduce { static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4); template<typename T, typename Operator> static __device__ __forceinline__ T run(T x, Operator &op) { constexpr int OFFSET = THREADS / 2; x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET)); return Allreduce<OFFSET>::run(x, op); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<> struct Allreduce<2> { template<typename T, typename Operator> static __device__ __forceinline__ T run(T x, Operator &op) { x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1)); return x; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool A_in_regs=false, bool B_in_regs=false, typename Tensor0, typename Tensor1, typename Tensor2, typename Tensor3, typename Tensor4, typename TiledMma, typename TiledCopyA, typename TiledCopyB, typename ThrCopyA, typename ThrCopyB> __forceinline__ __device__ void gemm(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsA, Tensor4 const& tCsB, TiledMma tiled_mma, TiledCopyA smem_tiled_copy_A, TiledCopyB smem_tiled_copy_B, ThrCopyA smem_thr_copy_A, ThrCopyB smem_thr_copy_B) { CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N if (!A_in_regs) { cute::copy(smem_tiled_copy_A, tCsA(_, _, _0{}), tCrA_copy_view(_, _, _0{})); } if (!B_in_regs) { cute::copy(smem_tiled_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{})); } #pragma unroll for (int i = 0; i < size<2>(tCrA); ++i) { if (i < size<2>(tCrA) - 1) { if (!A_in_regs) { cute::copy(smem_tiled_copy_A, tCsA(_, _, i + 1), tCrA_copy_view(_, _, i + 1)); } if (!B_in_regs) { cute::copy(smem_tiled_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1)); } } cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc); } } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Tensor0, typename Tensor1, typename Tensor2, typename Tensor3, typename TiledMma, typename TiledCopy, typename ThrCopy> __forceinline__ __device__ void gemm_rs(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsB, TiledMma tiled_mma, TiledCopy smem_tiled_copy_B, ThrCopy smem_thr_copy_B) { CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N cute::copy(smem_tiled_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{})); #pragma unroll for (int i = 0; i < size<2>(tCrA); ++i) { if (i < size<2>(tCrA) - 1) { cute::copy(smem_tiled_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1)); } cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) template<typename Layout> __forceinline__ __device__ auto convert_layout_acc_rowcol(Layout acc_layout) { static_assert(decltype(size<0>(acc_layout))::value == 4); static_assert(decltype(rank(acc_layout))::value == 3); auto l = logical_divide(acc_layout, Shape<_2>{}); // ((2, 2), MMA_M, MMA_N) return make_layout(make_layout(get<0, 1>(l), get<1>(l)), make_layout(get<0, 0>(l), get<2>(l))); }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2) // if using m16n8k16, or to (4, MMA_M, MMA_N) if using m16n8k8. template<typename MMA_traits, typename Layout> __forceinline__ __device__ auto convert_layout_acc_Aregs(Layout acc_layout) { using X = Underscore; static_assert(decltype(size<0>(acc_layout))::value == 4); static_assert(decltype(rank(acc_layout))::value == 3); constexpr int mma_shape_K = get<2>(typename MMA_traits::Shape_MNK{}); static_assert(mma_shape_K == 8 || mma_shape_K == 16); if constexpr (mma_shape_K == 8) { return acc_layout; } else { auto l = logical_divide(acc_layout, Shape<X, X, _2>{}); // (4, MMA_M, (2, MMA_N / 2))) return make_layout(make_layout(get<0>(l), get<2, 0>(l)), get<1>(l), get<2, 1>(l)); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to ((4, 2), MMA_M, MMA_N / 2) template<typename Layout> __forceinline__ __device__ auto convert_layout_acc_dropout(Layout acc_layout) { using X = Underscore; static_assert(decltype(size<0>(acc_layout))::value == 4); static_assert(decltype(rank(acc_layout))::value == 3); auto l = logical_divide(acc_layout, Shape<X, X, _2>{}); // (4, MMA_M, (2, MMA_N / 2))) return make_layout(make_layout(get<0>(l), get<2, 0>(l)), get<1>(l), get<2, 1>(l)); }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename To_type, typename Engine, typename Layout> __forceinline__ __device__ auto convert_type(Tensor<Engine, Layout> const &tensor) { using From_type = typename Engine::value_type; constexpr int numel = decltype(size(tensor))::value; cutlass::NumericArrayConverter<To_type, From_type, numel> convert_op; // HACK: this requires tensor to be "contiguous" auto frag = convert_op(*reinterpret_cast<const cutlass::Array<From_type, numel> *>(tensor.data())); return make_tensor(make_rmem_ptr<To_type>(&frag), tensor.layout()); } //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Engine, typename Layout> __forceinline__ __device__ void relu_(Tensor<Engine, Layout> &tensor) { constexpr int numel = decltype(size(tensor))::value; static_assert(numel % 2 == 0); using value_t = typename Engine::value_type; // HACK: this requires tensor to be "contiguous" Tensor tensor_uint32 = recast<uint32_t>(tensor); #pragma unroll for (int i = 0; i < size(tensor_uint32); ++i) { tensor_uint32(i) = relu2<value_t>(tensor_uint32(i)); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // On SM80 and above, we can fuse fp32 -> fp16/bf16 conversion and relu into 1 instruction template <typename To_type, typename Engine, typename Layout> __forceinline__ __device__ auto convert_type_relu(Tensor<Engine, Layout> const &tensor) { using From_type = typename Engine::value_type; static_assert(std::is_same_v<To_type, cutlass::half_t> || std::is_same_v<To_type, cutlass::bfloat16_t>); static_assert(std::is_same_v<float, From_type>); constexpr int numel = decltype(size(tensor))::value; static_assert(numel % 2 == 0); #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 // HACK: this requires tensor to be "contiguous" Tensor tensor_float2 = recast<float2>(tensor); Tensor out_uint32 = make_tensor<uint32_t>(tensor_float2.layout()); #pragma unroll for (int i = 0; i < size(out_uint32); ++i) { out_uint32(i) = convert_relu2<To_type>(tensor_float2(i)); } Tensor out = make_tensor(make_rmem_ptr<To_type>(out_uint32.data()), tensor.layout()); #else Tensor out = flash::convert_type<To_type>(tensor); flash::relu_(out); #endif return out; } //////////////////////////////////////////////////////////////////////////////////////////////////// // Blocks until all but N previous cp.async.commit_group operations have committed. // This differs from cute::cp_async_wait in that when N = 0 we don't call cp.async.wait_all // (which is equivalent to commit_group then wait_group 0). // Instead we just call cp.async.wait_group 0, which is slightly faster. // https://github.com/NVIDIA/cutlass/blob/master/include/cute/arch/copy_sm80.hpp#L113 template <int N> CUTE_HOST_DEVICE void cp_async_wait() { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) asm volatile("cp.async.wait_group %0;\n" :: "n"(N)); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_MN=true, bool Is_even_K=true, bool Clear_OOB_MN=false, bool Clear_OOB_K=true, typename TiledCopy, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> __forceinline__ __device__ void copy(TiledCopy tiled_copy, Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &identity_MN, Tensor<Engine3, Layout3> const &predicate_K, const int max_MN=0) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K // There's no case where !Clear_OOB_K && Clear_OOB_MN static_assert(!(Clear_OOB_MN && !Clear_OOB_K)); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || predicate_K(k)) { cute::copy(tiled_copy, S(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { cute::clear(D(_, m, k)); } } } else if (Clear_OOB_MN) { cute::clear(D(_, m, _)); } } // TD [2023-04-13]: Strange that the code below can cause race condition. // I think it's because the copies are under an if statement. // if (Is_even_K) { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { // copy(tiled_copy, S(_, m, _), D(_, m, _)); // } else if (Clear_OOB_MN) { // clear(D(_, m, _)); // } // } // } else { // It's slightly faster in this case if iterate over K first // #pragma unroll // for (int k = 0; k < size<2>(S); ++k) { // if (predicate_K(k)) { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { // copy(tiled_copy, S(_, m, k), D(_, m, k)); // } else if (Clear_OOB_MN) { // clear(D(_, m, k)); // } // } // } else if (Clear_OOB_K) { // There's no case where !Clear_OOB_K && Clear_OOB_MN // if (Clear_OOB_MN || Is_even_MN) { // clear(D(_, _, k)); // } else { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (!(Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN)) { // clear(D(_, m, k)); // } // } // } // } // } // } } //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_K=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> __forceinline__ __device__ void copy_w_min_idx(Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &identity_MN, Tensor<Engine3, Layout3> const &predicate_K, const int max_MN=0, const int min_MN=0) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, max_MN = %d, min_MN = %d\n", blockIdx.y, max_MN, min_MN); } #pragma unroll for (int m = 0; m < size<1>(S); ++m) { // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); } if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { // if (threadIdx.x == 0 && blockIdx.z == 0) { printf("Inner loop, blockIdx.y = %d, m = %d\n", blockIdx.y, get<0>(identity_MN(0, m, 0))); } #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || predicate_K(k)) { cute::copy(S(_, m, k), D(_, m, k)); } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
candle/candle-flash-attn/kernels/utils.h/0
{ "file_path": "candle/candle-flash-attn/kernels/utils.h", "repo_id": "candle", "token_count": 7823 }
31
pub const AFFINE: &str = include_str!(concat!(env!("OUT_DIR"), "/affine.ptx")); pub const BINARY: &str = include_str!(concat!(env!("OUT_DIR"), "/binary.ptx")); pub const CAST: &str = include_str!(concat!(env!("OUT_DIR"), "/cast.ptx")); pub const CONV: &str = include_str!(concat!(env!("OUT_DIR"), "/conv.ptx")); pub const FILL: &str = include_str!(concat!(env!("OUT_DIR"), "/fill.ptx")); pub const INDEXING: &str = include_str!(concat!(env!("OUT_DIR"), "/indexing.ptx")); pub const QUANTIZED: &str = include_str!(concat!(env!("OUT_DIR"), "/quantized.ptx")); pub const REDUCE: &str = include_str!(concat!(env!("OUT_DIR"), "/reduce.ptx")); pub const SORT: &str = include_str!(concat!(env!("OUT_DIR"), "/sort.ptx")); pub const TERNARY: &str = include_str!(concat!(env!("OUT_DIR"), "/ternary.ptx")); pub const UNARY: &str = include_str!(concat!(env!("OUT_DIR"), "/unary.ptx"));
candle/candle-kernels/src/lib.rs/0
{ "file_path": "candle/candle-kernels/src/lib.rs", "repo_id": "candle", "token_count": 365 }
32
#include <metal_stdlib> #include <metal_integer> #include <metal_atomic> using namespace metal; // Constants // 2^32 and 1/2^32. Useful for converting between float and uint. static constexpr constant ulong UNIF01_NORM32 = 4294967296; static constexpr constant float UNIF01_INV32 = 2.328306436538696289e-10; // 2 * pi static constexpr constant float TWO_PI = 2.0 * M_PI_F; static constexpr constant int3 S1 = {13, 19, 12}; static constexpr constant int3 S2 = {2, 25, 4}; static constexpr constant int3 S3 = {3, 11, 17}; // Used to prevent bad seeds. static constexpr constant uint64_t PHI[16] = { 0x9E3779B97F4A7C15, 0xF39CC0605CEDC834, 0x1082276BF3A27251, 0xF86C6A11D0C18E95, 0x2767F0B153D27B7F, 0x0347045B5BF1827F, 0x01886F0928403002, 0xC1D64BA40F335E36, 0xF06AD7AE9717877E, 0x85839D6EFFBD7DC6, 0x64D325D1C5371682, 0xCADD0CCCFDFFBBE1, 0x626E33B8D04B4331, 0xBBF73C790D94F79D, 0x471C4AB3ED3D82A5, 0xFEC507705E4AE6E5, }; // Combined Tausworthe and LCG Random Number Generator. // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-37-efficient-random-number-generation-and-application // https://indico.cern.ch/event/93877/contributions/2118070/attachments/1104200/1575343/acat3_revised_final.pdf struct HybridTaus { float state; HybridTaus() thread = default; HybridTaus() threadgroup = default; HybridTaus() device = default; HybridTaus() constant = default; // Generate seeds for each thread. METAL_FUNC static uint4 seed_per_thread(const ulong4 seeds) { return uint4(ulong4(seeds) * ulong4(PHI[0], PHI[1], PHI[2], PHI[3]) * ulong4(1099087573UL)); } // Tausworthe generator. METAL_FUNC static uint taus(const uint z, const int3 s, const uint M) { uint b = (((z << s.x) ^ z) >> s.y); return (((z & M) << s.z) ^ b); } // LCG generator. METAL_FUNC static uint lcg(const uint z) { return (1664525 * z + 1013904223UL); } // Initialize the RNG state. METAL_FUNC static HybridTaus init(const ulong4 seeds) { uint4 seed = seed_per_thread(seeds); // Seed #1 uint z1 = taus(seed.x, S1, 4294967294UL); uint z2 = taus(seed.y, S2, 4294967288UL); uint z3 = taus(seed.z, S3, 4294967280UL); uint z4 = lcg(seed.x); // Seed #2 uint r1 = (z1^z2^z3^z4^seed.y); z1 = taus(r1, S1, 429496729UL); z2 = taus(r1, S2, 4294967288UL); z3 = taus(r1, S3, 429496280UL); z4 = lcg(r1); // Seed #3 r1 = (z1^z2^z3^z4^seed.z); z1 = taus(r1, S1, 429496729UL); z2 = taus(r1, S2, 4294967288UL); z3 = taus(r1, S3, 429496280UL); z4 = lcg(r1); // Seed #4 r1 = (z1^z2^z3^z4^seed.w); z1 = taus(r1, S1, 429496729UL); z2 = taus(r1, S2, 4294967288UL); z3 = taus(r1, S3, 429496280UL); z4 = lcg(r1); HybridTaus rng; rng.state = (z1^z2^z3^z4) * UNIF01_INV32; return rng; } METAL_FUNC float rand() { uint seed = this->state * UNIF01_NORM32; uint z1 = taus(seed, S1, 429496729UL); uint z2 = taus(seed, S2, 4294967288UL); uint z3 = taus(seed, S3, 429496280UL); uint z4 = lcg(seed); thread float result = this->state; this->state = (z1^z2^z3^z4) * UNIF01_INV32; return result; } }; template<typename T> METAL_FUNC void rand_uniform( constant size_t &size, constant float &min, constant float &max, device atomic_uint *seed, device T *out, uint tid [[thread_position_in_grid]] ) { if (tid >= size) { return; } // Evenly sized vectors need an offset when writing the mirror element. uint off = 1 - size % 2; float diff = abs(min - max); uint s = atomic_load_explicit(seed, memory_order_relaxed); HybridTaus rng = HybridTaus::init({ulong(s), tid, 1, 1}); out[tid] = static_cast<T>(rng.rand() * diff + min); if (tid == 0) { atomic_store_explicit(seed, uint(rng.rand() * UNIF01_NORM32), memory_order_relaxed); // Return early if tid == 0 && off == 0, otherwise we will write to out[size]. if (off == 0) return; } // Use symmetry to fill the other half of the array. out[size - off - tid] = static_cast<T>(rng.rand() * diff + min); } // Create Gaussian normal distribution using Box-Muller transform: // https://en.wikipedia.org/wiki/Box–Muller_transform template<typename T> METAL_FUNC void normal( constant size_t &size, constant float &mean, constant float &stddev, device atomic_uint *seed, device T *out, uint tid [[thread_position_in_grid]] ) { if (tid >= size) { return; } // Evenly sized vectors need an offset when writing the mirror element. uint off = 1 - size % 2; uint s = atomic_load_explicit(seed, memory_order_relaxed); HybridTaus rng = HybridTaus::init({ulong(s), tid, 1, 1}); float u1 = rng.rand(); float u2 = rng.rand(); float cosval; float sinval = sincos(TWO_PI * u2, cosval); float mag = stddev * sqrt(-2.0 * log(u1)); float z0 = mag * cosval + mean; float z1 = mag * sinval + mean; out[tid] = static_cast<T>(z0); if (tid == 0) { atomic_store_explicit(seed, uint(rng.rand() * UNIF01_NORM32), memory_order_relaxed); // Return early if tid == 0 && off == 0, otherwise we will write to out[size]. if (off == 0) return; } // Use symmetry to fill the other half of the array. out[size - off - tid] = static_cast<T>(z1); } #define UNIFORM_OP(NAME, T) \ kernel void rand_uniform_##NAME( \ constant size_t &size, \ constant float &min, \ constant float &max, \ device atomic_uint *seed, \ device T *out, \ uint tid [[thread_position_in_grid]] \ ) { \ rand_uniform<T>(size, min, max, seed, out, tid); \ } \ #define NORMAL_OP(NAME, T) \ kernel void rand_normal_##NAME( \ constant size_t &size, \ constant float &mean, \ constant float &stddev, \ device atomic_uint *seed, \ device T *out, \ uint tid [[thread_position_in_grid]] \ ) { \ normal<T>(size, mean, stddev, seed, out, tid); \ } \ #define RANDOM_OPS(NAME, T) \ UNIFORM_OP(NAME, T) \ NORMAL_OP(NAME, T) \ RANDOM_OPS(f32, float) RANDOM_OPS(f16, half) #if __METAL_VERSION__ >= 310 RANDOM_OPS(bf16, bfloat) #endif
candle/candle-metal-kernels/src/random.metal/0
{ "file_path": "candle/candle-metal-kernels/src/random.metal", "repo_id": "candle", "token_count": 3671 }
33
use candle::{CpuStorage, DType, Layout, Module, Result, Shape, Tensor, D}; use rayon::prelude::*; /// Applies the softmax function to the input tensor, rescaling the element so that elements on /// a slice of fixed index on dimension `dim` are between 0 and 1 and sum to 1. /// /// ```rust /// use candle::{Tensor, Device, test_utils::to_vec2_round}; /// let a = Tensor::new(&[[0f32, 1., 0., 1.], [-2., 2., 3., -3.]], &Device::Cpu)?; /// let a = candle_nn::ops::softmax(&a, 1)?; /// assert_eq!( /// to_vec2_round(&a, 4)?, /// &[ /// [0.1345, 0.3655, 0.1345, 0.3655], /// [0.0049, 0.2671, 0.7262, 0.0018] /// ]); /// # Ok::<(), candle::Error>(()) /// ``` pub fn softmax<D: candle::shape::Dim>(xs: &Tensor, dim: D) -> Result<Tensor> { let dim = dim.to_index(xs.shape(), "softmax")?; let max = xs.max_keepdim(dim)?; let diff = xs.broadcast_sub(&max)?; let num = diff.exp()?; let den = num.sum_keepdim(dim)?; num.broadcast_div(&den) } pub fn log_softmax<D: candle::shape::Dim>(xs: &Tensor, d: D) -> Result<Tensor> { let d = d.to_index(xs.shape(), "log-softmax")?; let max = xs.max_keepdim(d)?; let diff = xs.broadcast_sub(&max)?; let sum_exp = diff.exp()?.sum_keepdim(d)?; let log_sm = diff.broadcast_sub(&sum_exp.log()?)?; Ok(log_sm) } pub fn silu(xs: &Tensor) -> Result<Tensor> { xs.silu() } pub fn swiglu(xs: &Tensor) -> Result<Tensor> { let xs = xs.chunk(2, D::Minus1)?; &xs[0].silu()? * &xs[1] } struct Sigmoid; impl candle::CustomOp1 for Sigmoid { fn name(&self) -> &'static str { "sigmoid" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { use candle::backend::BackendStorage; fn fwd<T: num_traits::Float>(v: T) -> T { (v.neg().exp() + T::one()).recip() } // FIXME: using `candle::map_dtype` causes compilation errors. let storage = match storage { CpuStorage::BF16(slice) => { CpuStorage::BF16(candle::cpu_backend::unary_map(slice, layout, fwd)) } CpuStorage::F16(slice) => { CpuStorage::F16(candle::cpu_backend::unary_map(slice, layout, fwd)) } CpuStorage::F32(slice) => { CpuStorage::F32(candle::cpu_backend::unary_map(slice, layout, fwd)) } CpuStorage::F64(slice) => { CpuStorage::F64(candle::cpu_backend::unary_map(slice, layout, fwd)) } _ => Err(candle::Error::UnsupportedDTypeForOp( storage.dtype(), self.name(), ))?, }; Ok((storage, layout.shape().clone())) } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::backend::BackendStorage; use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, ValidAsZeroBits, }; use candle::cuda_backend::SlicePtrOrNull; use candle::cuda_backend::{kernel_name, kernels, Map1, WrapErr}; use candle::{CudaDevice, WithDType}; struct S; impl Map1 for S { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el_count as u32); let ds = SlicePtrOrNull::params_from_layout(dev, layout)?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("usigmoid"), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el_count) }.w()?; let params = (el_count, dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } let dev = storage.device(); let slice = S.map(&storage.slice, dev, layout)?; let dst = candle::CudaStorage { slice, device: dev.clone(), }; Ok((dst, layout.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, storage: &candle::MetalStorage, layout: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; use candle::MetalError; let device = storage.device(); let dtype = storage.dtype(); let shape = layout.shape(); let el_count = shape.elem_count(); let buffer = device.new_buffer(el_count, dtype, "sigmoid")?; let command_buffer = device.command_buffer()?; command_buffer.set_label("sigmoid"); let src = candle_metal_kernels::BufferOffset { buffer: storage.buffer(), offset_in_bytes: layout.start_offset() * storage.dtype().size_in_bytes(), }; match (el_count % 2, dtype, layout.is_contiguous()) { (0, DType::BF16 | DType::F16, true) => { use candle_metal_kernels::unary::contiguous_tiled; let kernel_name = match dtype { DType::F16 => contiguous_tiled::sigmoid::HALF, DType::F32 => contiguous_tiled::sigmoid::FLOAT, DType::BF16 => contiguous_tiled::sigmoid::BFLOAT, dtype => { candle::bail!( "Metal contiguous_tiled unary sigmoid {dtype:?} not implemented" ) } }; candle_metal_kernels::call_unary_contiguous_tiled( device.metal_device(), &command_buffer, device.kernels(), kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, true) => { use candle_metal_kernels::unary::contiguous; let kernel_name = match dtype { DType::F16 => contiguous::sigmoid::HALF, DType::F32 => contiguous::sigmoid::FLOAT, DType::BF16 => contiguous::sigmoid::BFLOAT, dtype => { candle::bail!("Metal contiguous unary sigmoid {dtype:?} not implemented") } }; candle_metal_kernels::call_unary_contiguous( device.metal_device(), &command_buffer, device.kernels(), kernel_name, el_count, src, &buffer, ) .map_err(MetalError::from)?; } (_, _, false) => { use candle_metal_kernels::unary::strided; let kernel_name = match dtype { DType::F16 => strided::sigmoid::HALF, DType::F32 => strided::sigmoid::FLOAT, DType::BF16 => strided::sigmoid::BFLOAT, dtype => { candle::bail!("Metal strided unary sigmoid {dtype:?} not implemented") } }; let dst = candle_metal_kernels::BufferOffset::zero_offset(&buffer); candle_metal_kernels::call_unary_strided( device.metal_device(), &command_buffer, device.kernels(), kernel_name, layout.dims(), src, layout.stride(), dst, ) .map_err(MetalError::from)?; } } let new_storage = candle::MetalStorage::new(buffer, device.clone(), el_count, dtype); Ok((new_storage, layout.shape().clone())) } fn bwd(&self, _arg: &Tensor, res: &Tensor, grad_res: &Tensor) -> Result<Option<Tensor>> { // d/dx sigmoid(x) = (1 - sigmoid(x)) * sigmoid(x) let d_dx_sigmoid = res.ones_like()?.sub(res)?.mul(res)?; Ok(Some(grad_res.mul(&d_dx_sigmoid)?)) } } pub fn sigmoid(xs: &Tensor) -> Result<Tensor> { xs.apply_op1(Sigmoid) } pub fn hard_sigmoid(xs: &Tensor) -> Result<Tensor> { // TODO: Should we have a specialized op for this? ((xs + 3.0)? / 6.0)?.clamp(0f32, 1f32) } pub fn leaky_relu(xs: &Tensor, negative_slope: f64) -> Result<Tensor> { let zeros = xs.zeros_like()?; xs.maximum(&zeros)? + xs.minimum(&zeros)? * negative_slope } pub fn dropout(xs: &Tensor, drop_p: f32) -> Result<Tensor> { // This implementation is inefficient as it stores the full mask for the backward pass. // Instead we could just store the seed and have a specialized kernel that would both // generate the random mask and apply it. // Another easier optimization would be to be able to generate boolean mask using just a bit of // entropy per element rather than generating a full float per element. if !(0. ..1.).contains(&drop_p) { candle::bail!("dropout probability has to be in [0, 1), got {drop_p}") } let rand = Tensor::rand(0f32, 1f32, xs.shape(), xs.device())?; let scale = 1.0 / (1.0 - drop_p as f64); let drop_p = Tensor::new(drop_p, xs.device())?.broadcast_as(xs.shape())?; let mask = (rand.ge(&drop_p)?.to_dtype(xs.dtype())? * scale)?; xs * mask } #[derive(Clone, Debug)] pub struct Dropout { drop_p: f32, } impl Dropout { pub fn new(drop_p: f32) -> Dropout { Self { drop_p } } pub fn forward(&self, xs: &Tensor, train: bool) -> Result<Tensor> { if train { dropout(xs, self.drop_p) } else { Ok(xs.clone()) } } } impl candle::ModuleT for Dropout { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { self.forward(xs, train) } } struct SoftmaxLastDim; impl candle::CustomOp1 for SoftmaxLastDim { fn name(&self) -> &'static str { "softmax-last-dim" } fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> { fn softmax<T: candle::WithDType + num_traits::Float>( src: &[T], layout: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let el_count = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let mut dst = vec![T::zero(); el_count]; src.par_chunks(dim_m1) .zip(dst.par_chunks_mut(dim_m1)) .for_each(|(src, dst)| { let mut max = T::neg_infinity(); unsafe { T::vec_reduce_max(src.as_ptr(), &mut max, dim_m1) }; for (s, d) in src.iter().zip(dst.iter_mut()) { *d = (*s - max).exp(); } let mut sum_exp = T::zero(); unsafe { T::vec_reduce_sum(dst.as_ptr(), &mut sum_exp, dim_m1) }; for d in dst.iter_mut() { *d /= sum_exp } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, Shape::from_dims(dims))) } match storage { CpuStorage::BF16(slice) => softmax::<half::bf16>(slice, layout), CpuStorage::F16(slice) => softmax::<half::f16>(slice, layout), CpuStorage::F32(slice) => softmax::<f32>(slice, layout), CpuStorage::F64(slice) => softmax::<f64>(slice, layout), _ => candle::bail!("unsupported dtype for softmax {:?}", storage), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, storage: &candle::CudaStorage, layout: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, Map1, WrapErr}; use candle::{CudaDevice, WithDType}; struct S; impl Map1 for S { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let el = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let (n_rows, n_cols) = (el / dim_m1, dim_m1); let cfg = LaunchConfig { grid_dim: (n_rows as u32, 1, 1), block_dim: (1, 32, 1), shared_mem_bytes: 0, }; let func = dev.get_or_load_func(&kernel_name::<T>("softmax"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = (&src, &dst, n_cols as i32); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } use candle::backend::BackendStorage; let dev = storage.device(); let slice = S.map(&storage.slice, dev, layout)?; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, layout.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, storage: &candle::MetalStorage, layout: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = storage.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); let name = match storage.dtype() { DType::F32 => "softmax_f32", DType::F16 => "softmax_f16", DType::BF16 => "softmax_bf16", dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"), }; let n = layout.stride().len(); if !(layout.is_contiguous() && layout.stride()[n - 1] == 1) { candle::bail!("Non contiguous softmax-last-dim is not implemented"); } let last_dim = layout.dims()[layout.shape().rank() - 1]; let elem_count = layout.shape().elem_count(); let output = device.new_buffer(elem_count, storage.dtype(), "softmax")?; candle_metal_kernels::call_last_softmax( device.metal_device(), &command_buffer, kernels, name, elem_count, last_dim, storage.buffer(), layout.start_offset() * storage.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, storage.dtype()); Ok((newstorage, layout.shape().clone())) } } pub fn softmax_last_dim(xs: &Tensor) -> Result<Tensor> { xs.apply_op1_no_bwd(&SoftmaxLastDim) } #[derive(Debug, Clone)] struct RmsNorm { eps: f32, } impl candle::CustomOp2 for RmsNorm { fn name(&self) -> &'static str { "rms-norm" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, ) -> Result<(CpuStorage, Shape)> { use candle::backend::BackendStorage; let eps = self.eps; fn inner< T: candle::WithDType + num_traits::Float + num_traits::AsPrimitive<f32> + num_traits::FromPrimitive, >( src: &[T], layout: &Layout, alpha: &[T], alpha_layout: &Layout, eps: f32, ) -> Result<(CpuStorage, Shape)> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => &alpha[o1..o2], }; let el_count = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let mut dst = vec![T::zero(); el_count]; src.par_chunks(dim_m1) .zip(dst.par_chunks_mut(dim_m1)) .for_each(|(src, dst)| { let sum2 = src .iter() .map(|&v| { let v = v.as_(); v * v }) .sum::<f32>(); let m = (sum2 / dim_m1 as f32 + eps).sqrt(); let m = T::from_f32(m).unwrap_or_else(T::nan); for ((d, s), alpha) in dst.iter_mut().zip(src.iter()).zip(alpha) { *d = *s / m * *alpha } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, Shape::from_dims(dims))) } use CpuStorage as C; match (s1, s2) { (C::BF16(s1), C::BF16(s2)) => inner::<half::bf16>(s1, l1, s2, l2, eps), (C::F16(s1), C::F16(s2)) => inner::<half::f16>(s1, l1, s2, l2, eps), (C::F32(s1), C::F32(s2)) => inner::<f32>(s1, l1, s2, l2, eps), _ => candle::bail!("unsupported dtype for rmsnorm {:?}", s1.dtype()), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, Map2, WrapErr}; use candle::{CudaDevice, WithDType}; struct S { eps: f32, } impl Map2 for S { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, layout: &Layout, alpha: &CudaSlice<T>, alpha_layout: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => alpha.slice(o1..o2), }; let el = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let (n_rows, n_cols) = (el / dim_m1, dim_m1); let cfg = LaunchConfig { grid_dim: (n_rows as u32, 1, 1), block_dim: (1024, 1, 1), shared_mem_bytes: 0, }; let func = dev.get_or_load_func(&kernel_name::<T>("rmsnorm"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = (&src, &dst, &alpha, n_cols as i32, self.eps); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } use candle::backend::BackendStorage; let dev = s1.device(); let slice = S { eps: self.eps }.map(&s1.slice, l1, &s2.slice, l2, dev)?; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, s1: &candle::MetalStorage, l1: &Layout, s2: &candle::MetalStorage, l2: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = s1.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); let name = match (s1.dtype(), s2.dtype()) { (DType::F32, DType::F32) => "rmsnorm_f32", (DType::F16, DType::F16) => "rmsnorm_f16", (DType::BF16, DType::BF16) => "rmsnorm_bf16", (dt1, dt2) => candle::bail!("rmsnorm is not implemented for {dt1:?} {dt2:?}"), }; if !(l1.is_contiguous() && l2.is_contiguous()) { candle::bail!("Non contiguous rmsnorm is not implemented"); } let last_dim = l1.dims()[l1.shape().rank() - 1]; let elem_count = l1.shape().elem_count(); let output = device.new_buffer(elem_count, s1.dtype(), "rmsnorm")?; candle_metal_kernels::call_rms_norm( device.metal_device(), &command_buffer, kernels, name, elem_count, last_dim, self.eps, s1.buffer(), l1.start_offset() * s1.dtype().size_in_bytes(), s2.buffer(), l2.start_offset() * s2.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, s1.dtype()); Ok((newstorage, l1.shape().clone())) } } pub fn rms_norm_slow(x: &Tensor, alpha: &Tensor, eps: f32) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + eps as f64)?.sqrt()?)?; x_normed.to_dtype(x_dtype)?.broadcast_mul(alpha) } pub fn rms_norm(xs: &Tensor, alpha: &Tensor, eps: f32) -> Result<Tensor> { let hidden_size_xs = xs.dim(D::Minus1)?; let hidden_size_alpha = alpha.dims1()?; if hidden_size_xs != hidden_size_alpha { candle::bail!( "shape mismatch in rms-norm {:?} {:?}", xs.shape(), alpha.shape() ) } xs.apply_op2_no_bwd(alpha, &RmsNorm { eps }) } #[derive(Debug, Clone)] struct LayerNorm { eps: f32, } impl candle::CustomOp3 for LayerNorm { fn name(&self) -> &'static str { "layer-norm" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { use candle::backend::BackendStorage; let eps = self.eps; fn inner< T: candle::WithDType + num_traits::Float + num_traits::AsPrimitive<f32> + num_traits::FromPrimitive, >( src: &[T], layout: &Layout, alpha: &[T], alpha_layout: &Layout, beta: &[T], beta_layout: &Layout, eps: f32, ) -> Result<(CpuStorage, Shape)> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => &alpha[o1..o2], }; let beta = match beta_layout.contiguous_offsets() { None => candle::bail!("beta has to be contiguous"), Some((o1, o2)) => &beta[o1..o2], }; let el_count = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let mut dst = vec![T::zero(); el_count]; src.par_chunks(dim_m1) .zip(dst.par_chunks_mut(dim_m1)) .for_each(|(src, dst)| { let mut sum = 0f32; let mut sum2 = 0f32; for v in src { let v = v.as_(); sum += v; sum2 += v * v; } let mean = sum / dim_m1 as f32; let var = sum2 / dim_m1 as f32 - mean * mean; let inv_std = (var + eps).sqrt().recip(); for ((d, s), (alpha, beta)) in dst.iter_mut().zip(src.iter()).zip(alpha.iter().zip(beta)) { let alpha = alpha.as_(); let beta = beta.as_(); let d_ = (s.as_() - mean) * inv_std * alpha + beta; *d = T::from_f32(d_).unwrap_or_else(T::nan); } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, Shape::from_dims(dims))) } use CpuStorage as C; match (s1, s2, s3) { (C::BF16(s1), C::BF16(s2), C::BF16(s3)) => { inner::<half::bf16>(s1, l1, s2, l2, s3, l3, eps) } (C::F16(s1), C::F16(s2), C::F16(s3)) => inner::<half::f16>(s1, l1, s2, l2, s3, l3, eps), (C::F32(s1), C::F32(s2), C::F32(s3)) => inner::<f32>(s1, l1, s2, l2, s3, l3, eps), _ => candle::bail!("unsupported dtype for rmsnorm {:?}", s1.dtype()), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, Map3, WrapErr}; use candle::{CudaDevice, WithDType}; struct S { eps: f32, } impl Map3 for S { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, layout: &Layout, alpha: &CudaSlice<T>, alpha_layout: &Layout, beta: &CudaSlice<T>, beta_layout: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match layout.contiguous_offsets() { None => candle::bail!("input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let alpha = match alpha_layout.contiguous_offsets() { None => candle::bail!("alpha has to be contiguous"), Some((o1, o2)) => alpha.slice(o1..o2), }; let beta = match beta_layout.contiguous_offsets() { None => candle::bail!("beta has to be contiguous"), Some((o1, o2)) => beta.slice(o1..o2), }; let el = layout.shape().elem_count(); let dims = layout.shape().dims(); let dim_m1 = dims[dims.len() - 1]; let (n_rows, n_cols) = (el / dim_m1, dim_m1); let cfg = LaunchConfig { grid_dim: (n_rows as u32, 1, 1), block_dim: (1024, 1, 1), shared_mem_bytes: 0, }; let func = dev.get_or_load_func(&kernel_name::<T>("layernorm"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = (&src, &dst, &alpha, &beta, n_cols as i32, self.eps); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } use candle::backend::BackendStorage; let dev = s1.device(); let slice = S { eps: self.eps }.map(&s1.slice, l1, &s2.slice, l2, &s3.slice, l3, dev)?; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, s1: &candle::MetalStorage, l1: &Layout, s2: &candle::MetalStorage, l2: &Layout, s3: &candle::MetalStorage, l3: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = s1.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); let name = match (s1.dtype(), s2.dtype(), s3.dtype()) { (DType::F32, DType::F32, DType::F32) => "layernorm_f32", (DType::F16, DType::F16, DType::F16) => "layernorm_f16", (DType::BF16, DType::BF16, DType::BF16) => "layernorm_bf16", (dt1, dt2, dt3) => { candle::bail!("layernorm is not implemented for {dt1:?} {dt2:?} {dt3:?}") } }; if !(l1.is_contiguous() && l2.is_contiguous() && l3.is_contiguous()) { candle::bail!("Non contiguous layernorm is not implemented"); } let last_dim = l1.dims()[l1.shape().rank() - 1]; let elem_count = l1.shape().elem_count(); let output = device.new_buffer(elem_count, s1.dtype(), "layernorm")?; candle_metal_kernels::call_layer_norm( device.metal_device(), &command_buffer, kernels, name, elem_count, last_dim, self.eps, s1.buffer(), l1.start_offset() * s1.dtype().size_in_bytes(), s2.buffer(), l2.start_offset() * s2.dtype().size_in_bytes(), s3.buffer(), l3.start_offset() * s3.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, s1.dtype()); Ok((newstorage, l1.shape().clone())) } } pub fn layer_norm_slow(x: &Tensor, alpha: &Tensor, beta: &Tensor, eps: f32) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let x = { let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?; x.broadcast_sub(&mean_x)? }; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + eps as f64)?.sqrt()?)?; x_normed .to_dtype(x_dtype)? .broadcast_mul(alpha)? .broadcast_add(beta) } pub fn layer_norm(xs: &Tensor, alpha: &Tensor, beta: &Tensor, eps: f32) -> Result<Tensor> { let hidden_size_xs = xs.dim(D::Minus1)?; let hidden_size_alpha = alpha.dims1()?; let hidden_size_beta = beta.dims1()?; if hidden_size_xs != hidden_size_alpha || hidden_size_xs != hidden_size_beta { candle::bail!( "shape mismatch in layer-norm src: {:?} alpha: {:?} beta: {:?}", xs.shape(), alpha.shape(), beta.shape() ) } xs.apply_op3_no_bwd(alpha, beta, &LayerNorm { eps }) } // https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html pub fn pixel_shuffle(xs: &Tensor, upscale_factor: usize) -> Result<Tensor> { let (b_size, c, h, w) = xs.dims4()?; let out_c = c / upscale_factor / upscale_factor; xs.reshape((b_size, out_c, upscale_factor, upscale_factor, h, w))? .permute((0, 1, 4, 2, 5, 3))? .reshape((b_size, out_c, h * upscale_factor, w * upscale_factor)) } pub fn pixel_unshuffle(xs: &Tensor, downscale_factor: usize) -> Result<Tensor> { let (b_size, c, h, w) = xs.dims4()?; let out_c = c * downscale_factor * downscale_factor; xs.reshape(( b_size, c, h / downscale_factor, downscale_factor, w / downscale_factor, downscale_factor, ))? .permute((0, 1, 3, 5, 2, 4))? .reshape((b_size, out_c, h / downscale_factor, w / downscale_factor)) } // https://pytorch.org/docs/stable/generated/torch.nn.ReplicationPad2d.html pub fn replication_pad2d(xs: &Tensor, pad: usize) -> Result<Tensor> { match pad { 0 => Ok(xs.clone()), 1 => { let (_b_size, _c, h, w) = xs.dims4()?; let (first, last) = (xs.narrow(3, 0, 1)?, xs.narrow(3, w - 1, 1)?); let xs = Tensor::cat(&[&first, xs, &last], 3)?; let (first, last) = (xs.narrow(2, 0, 1)?, xs.narrow(2, h - 1, 1)?); Tensor::cat(&[&first, &xs, &last], 2) } n => candle::bail!("replication-pad with a size of {n} is not supported"), } } #[derive(Clone, Debug)] pub struct Identity; impl Identity { pub fn new() -> Identity { Self } } impl Default for Identity { fn default() -> Self { Self } } impl Module for Identity { fn forward(&self, xs: &Tensor) -> Result<Tensor> { Ok(xs.clone()) } }
candle/candle-nn/src/ops.rs/0
{ "file_path": "candle/candle-nn/src/ops.rs", "repo_id": "candle", "token_count": 18821 }
34
# candle-onnx This crate adds ONNX support to candle ## FAQ #### Missing protoc installation when compiling candle-onnx The candle-onnx dependency prost-build no longer comes bundled with prost binaries. This could cause the following error when attempting to compile candle-onnx: ``` error: failed to run custom build command for `candle-onnx` Caused by: // (...) Could not find `protoc` installation and this build crate cannot proceed without this knowledge. ``` To fix this issue install protoc on your system and make it available in your system `PATH`. See the [protoc documentation](https://grpc.io/docs/protoc-installation/) for more information.
candle/candle-onnx/README.md/0
{ "file_path": "candle/candle-onnx/README.md", "repo_id": "candle", "token_count": 180 }
35
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape from candle import Tensor, DType, QTensor @staticmethod def avg_pool2d(tensor: Tensor, ksize: int, stride: int = 1) -> Tensor: """ Applies the 2d avg-pool function to a given tensor.# """ pass @staticmethod def gelu(tensor: Tensor) -> Tensor: """ Applies the Gaussian Error Linear Unit (GELU) function to a given tensor. """ pass @staticmethod def max_pool2d(tensor: Tensor, ksize: int, stride: int = 1) -> Tensor: """ Applies the 2d max-pool function to a given tensor.# """ pass @staticmethod def relu(tensor: Tensor) -> Tensor: """ Applies the Rectified Linear Unit (ReLU) function to a given tensor. """ pass @staticmethod def silu(tensor: Tensor) -> Tensor: """ Applies the Sigmoid Linear Unit (SiLU) function to a given tensor. """ pass @staticmethod def softmax(tensor: Tensor, dim: int) -> Tensor: """ Applies the Softmax function to a given tensor.# """ pass @staticmethod def tanh(tensor: Tensor) -> Tensor: """ Applies the tanh function to a given tensor. """ pass
candle/candle-pyo3/py_src/candle/functional/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/functional/__init__.pyi", "repo_id": "candle", "token_count": 484 }
36
[project] name = 'candle-nn' requires-python = '>=3.7' authors = [ {name = 'The Candle Team'}, ] dynamic = [ 'description', 'license', 'readme', ] [project.urls] Homepage = 'https://github.com/huggingface/candle' Source = 'https://github.com/huggingface/candle' [build-system] requires = ["maturin>=1.0,<2.0"] build-backend = "maturin" [tool.maturin] python-source = "py_src" module-name = "candle.candle" bindings = 'pyo3' features = ["pyo3/extension-module"] [tool.black] line-length = 119 target-version = ['py35'] [project.optional-dependencies] testing = ["pytest", "black==22.3"] huggingface = ["transformers>=4.33.3", "huggingface-hub>=0.17.3"]
candle/candle-pyo3/pyproject.toml/0
{ "file_path": "candle/candle-pyo3/pyproject.toml", "repo_id": "candle", "token_count": 285 }
37
[package] name = "candle-transformers" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } byteorder = { workspace = true } candle = { workspace = true } candle-flash-attn = { workspace = true, optional = true } candle-nn = { workspace = true } fancy-regex = { workspace = true } intel-mkl-src = { workspace = true, optional = true } num-traits = { workspace = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_plain = { workspace = true } tracing = { workspace = true } [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate", "candle-nn/accelerate"] cuda = ["candle/cuda", "candle-nn/cuda"] flash-attn = ["cuda", "dep:candle-flash-attn"] mkl = ["dep:intel-mkl-src", "candle/mkl", "candle-nn/mkl"] metal = ["candle/metal", "candle-nn/metal"]
candle/candle-transformers/Cargo.toml/0
{ "file_path": "candle/candle-transformers/Cargo.toml", "repo_id": "candle", "token_count": 372 }
38
//! ConvNeXt implementation. //! //! See "A ConvNet for the 2020s" Liu et al. 2022 //! <https://arxiv.org/abs/2201.03545> //! and //! "ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders" Woo et al. 2023 //! <https://arxiv.org/abs/2301.00808> //! Original code: //! https://github.com/facebookresearch/ConvNeXt/ //! https://github.com/facebookresearch/ConvNeXt-V2/ //! timm: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/convnext.py use candle::shape::ShapeWithOneHole; use candle::{Result, D}; use candle_nn::{conv2d, layer_norm, linear, Conv2dConfig, Func, VarBuilder}; #[derive(Clone)] pub struct Config { blocks: [usize; 4], channels: [usize; 4], use_conv_mlp: bool, } impl Config { pub fn atto() -> Self { Self { blocks: [2, 2, 6, 2], channels: [40, 80, 160, 320], use_conv_mlp: true, } } pub fn femto() -> Self { Self { blocks: [2, 2, 6, 2], channels: [48, 96, 192, 384], use_conv_mlp: true, } } pub fn pico() -> Self { Self { blocks: [2, 2, 6, 2], channels: [64, 128, 256, 512], use_conv_mlp: true, } } pub fn nano() -> Self { Self { blocks: [2, 2, 8, 2], channels: [80, 160, 320, 640], use_conv_mlp: true, } } pub fn tiny() -> Self { Self { blocks: [3, 3, 9, 3], channels: [96, 192, 384, 768], use_conv_mlp: false, } } pub fn small() -> Self { Self { blocks: [3, 3, 27, 3], channels: [96, 192, 384, 768], use_conv_mlp: false, } } pub fn base() -> Self { Self { blocks: [3, 3, 27, 3], channels: [128, 256, 512, 1024], use_conv_mlp: false, } } pub fn large() -> Self { Self { blocks: [3, 3, 27, 3], channels: [192, 384, 768, 1536], use_conv_mlp: false, } } pub fn xlarge() -> Self { Self { blocks: [3, 3, 27, 3], channels: [256, 512, 1024, 2048], use_conv_mlp: false, } } pub fn huge() -> Self { Self { blocks: [3, 3, 27, 3], channels: [352, 704, 1408, 2816], use_conv_mlp: false, } } } // Layer norm for data in channels-last format. fn layer_norm_cl(dim: usize, vb: VarBuilder) -> Result<Func<'static>> { let norm = layer_norm(dim, 1e-6, vb)?; Ok(Func::new(move |xs| xs.apply(&norm))) } // Layer norm for data in channels-first format. fn layer_norm_cf(dim: usize, vb: VarBuilder) -> Result<Func<'static>> { let norm = layer_norm(dim, 1e-6, vb)?; Ok(Func::new(move |xs| { let xs = xs .permute((0, 2, 3, 1))? .apply(&norm)? .permute((0, 3, 1, 2))?; Ok(xs) })) } // Global response normalization layer // Based on https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/grn.py fn convnext2_grn(dim: usize, channels_last: bool, vb: VarBuilder) -> Result<Func<'static>> { let (shape, spatial_dim, channel_dim) = if channels_last { ((1, 1, 1, ()).into_shape(dim)?, [1, 2], 3) } else { ((1, (), 1, 1).into_shape(dim)?, [2, 3], 1) }; let gamma = vb.get(dim, "weight")?.reshape(&shape)?; let beta = vb.get(dim, "bias")?.reshape(&shape)?; Ok(Func::new(move |xs| { let residual = xs; let gx = xs .sqr()? .sum_keepdim(spatial_dim)? .mean_keepdim(spatial_dim)? .sqrt()?; let gxmean = gx.mean_keepdim(channel_dim)?; let nx = gx.broadcast_div(&(gxmean + 1e-6)?)?; let xs = xs .broadcast_mul(&nx)? .broadcast_mul(&gamma)? .broadcast_add(&beta)?; xs + residual })) } // Initial downsampling via a patchify layer. fn convnext_stem(out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride: 4, ..Default::default() }; let patchify = conv2d(3, out_channels, 4, conv2d_cfg, vb.pp(0))?; let norm = layer_norm_cf(out_channels, vb.pp(1))?; Ok(Func::new(move |xs| xs.apply(&patchify)?.apply(&norm))) } // Downsampling applied after the stages. fn convnext_downsample(dim: usize, vb: VarBuilder) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { stride: 2, ..Default::default() }; let norm = layer_norm_cf(dim / 2, vb.pp(0))?; let conv = conv2d(dim / 2, dim, 2, conv2d_cfg, vb.pp(1))?; Ok(Func::new(move |xs| xs.apply(&norm)?.apply(&conv))) } // MLP block from the original paper with optional GRN layer (v2 models). fn convnext_mlp(dim: usize, vb: VarBuilder) -> Result<Func<'static>> { let fc1 = linear(dim, 4 * dim, vb.pp("fc1"))?; let fc2 = linear(4 * dim, dim, vb.pp("fc2"))?; let grn = convnext2_grn(4 * dim, true, vb.pp("grn")); Ok(Func::new(move |xs| { let mut xs = xs.apply(&fc1)?.gelu_erf()?; if let Ok(g) = &grn { xs = xs.apply(g)?; } xs = xs.apply(&fc2)?; Ok(xs) })) } // MLP block using pointwise convolutions, with optional GRN layer (v2 models). fn convnext_conv_mlp(dim: usize, vb: VarBuilder) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { ..Default::default() }; let fc1 = conv2d(dim, 4 * dim, 1, conv2d_cfg, vb.pp("fc1"))?; let fc2 = conv2d(4 * dim, dim, 1, conv2d_cfg, vb.pp("fc2"))?; let grn = convnext2_grn(4 * dim, false, vb.pp("grn")); Ok(Func::new(move |xs| { let mut xs = xs.apply(&fc1)?.gelu_erf()?; if let Ok(g) = &grn { xs = xs.apply(g)?; } xs = xs.apply(&fc2)?; Ok(xs) })) } // A block consisting of a depthwise convolution, a MLP and layer scaling (v1 models only). fn convnext_block(dim: usize, use_conv_mlp: bool, vb: VarBuilder) -> Result<Func<'static>> { let conv2d_cfg = Conv2dConfig { groups: dim, padding: 3, ..Default::default() }; let conv_dw = conv2d(dim, dim, 7, conv2d_cfg, vb.pp("conv_dw"))?; let gamma = vb.get(dim, "gamma"); let (mlp, norm) = if use_conv_mlp { ( convnext_conv_mlp(dim, vb.pp("mlp"))?, layer_norm_cf(dim, vb.pp("norm"))?, ) } else { ( convnext_mlp(dim, vb.pp("mlp"))?, layer_norm_cl(dim, vb.pp("norm"))?, ) }; Ok(Func::new(move |xs| { let residual = xs; let mut xs = xs.apply(&conv_dw)?; xs = if use_conv_mlp { xs.apply(&norm)?.apply(&mlp)? } else { xs.permute((0, 2, 3, 1))? .apply(&norm)? .apply(&mlp)? .permute((0, 3, 1, 2))? }; if let Ok(g) = &gamma { xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?; }; xs + residual })) } // Each stage contains blocks and a downsampling layer for the previous stage. fn convnext_stage(cfg: &Config, stage_idx: usize, vb: VarBuilder) -> Result<Func<'static>> { let nblocks = cfg.blocks[stage_idx]; let mut blocks = Vec::with_capacity(nblocks); let dim = cfg.channels[stage_idx]; if stage_idx > 0 { blocks.push(convnext_downsample(dim, vb.pp("downsample"))?); } for block_idx in 0..nblocks { blocks.push(convnext_block( dim, cfg.use_conv_mlp, vb.pp(format!("blocks.{block_idx}")), )?); } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for block in blocks.iter() { xs = xs.apply(block)? } Ok(xs) })) } // Classification head. fn convnext_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { let norm = layer_norm_cl(outputs, vb.pp("norm"))?; let linear = linear(outputs, nclasses, vb.pp("fc"))?; Ok(Func::new(move |xs| xs.apply(&norm)?.apply(&linear))) } // Build a convnext model for a given configuration. fn convnext_model( config: &Config, nclasses: Option<usize>, vb: VarBuilder, ) -> Result<Func<'static>> { let head = match nclasses { None => None, Some(nclasses) => { let head = convnext_head(config.channels[3], nclasses, vb.pp("head"))?; Some(head) } }; let stem = convnext_stem(config.channels[0], vb.pp("stem"))?; let vb = vb.pp("stages"); let stage1 = convnext_stage(config, 0, vb.pp(0))?; let stage2 = convnext_stage(config, 1, vb.pp(1))?; let stage3 = convnext_stage(config, 2, vb.pp(2))?; let stage4 = convnext_stage(config, 3, vb.pp(3))?; Ok(Func::new(move |xs| { let xs = xs .apply(&stem)? .apply(&stage1)? .apply(&stage2)? .apply(&stage3)? .apply(&stage4)? .mean(D::Minus2)? .mean(D::Minus1)?; match &head { None => Ok(xs), Some(head) => xs.apply(head), } })) } pub fn convnext(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { convnext_model(cfg, Some(nclasses), vb) } pub fn convnext_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { convnext_model(cfg, None, vb) }
candle/candle-transformers/src/models/convnext.rs/0
{ "file_path": "candle/candle-transformers/src/models/convnext.rs", "repo_id": "candle", "token_count": 4881 }
39
use std::sync::Arc; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{linear_b as linear, Activation, Linear, VarBuilder}; fn default_max_position_embeddings() -> usize { 4096 } #[derive(serde::Deserialize, Debug, Clone)] pub struct Config { pub attention_bias: bool, pub head_dim: usize, // The code gemma configs include both hidden_act and hidden_activation. pub hidden_act: Option<Activation>, pub hidden_activation: Option<Activation>, pub hidden_size: usize, pub intermediate_size: usize, pub num_attention_heads: usize, pub num_hidden_layers: usize, pub num_key_value_heads: usize, pub rms_norm_eps: f64, pub rope_theta: f64, pub vocab_size: usize, #[serde(default = "default_max_position_embeddings")] pub max_position_embeddings: usize, } impl Config { fn hidden_act(&self) -> Result<Activation> { match (self.hidden_act, self.hidden_activation) { (None, Some(act)) | (Some(act), None) => Ok(act), (Some(_), Some(_)) => candle::bail!("both hidden_act and hidden_activation are set"), (None, None) => candle::bail!("none of hidden_act and hidden_activation are set"), } } } #[derive(Debug, Clone)] struct RmsNorm { weight: Tensor, eps: f64, } impl RmsNorm { fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(dim, "weight")?; Ok(Self { weight, eps }) } } impl Module for RmsNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?; x_normed .to_dtype(x_dtype)? .broadcast_mul(&(&self.weight + 1.0)?) } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.head_dim; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?; let up_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?; let down_proj = linear(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act()?, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_flash_attn: bool, } impl Attention { fn new( rotary_emb: Arc<RotaryEmbedding>, use_flash_attn: bool, cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = cfg.head_dim; let bias = cfg.attention_bias; let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?; let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, rotary_emb, kv_cache: None, use_flash_attn, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = query_states.transpose(1, 2)?; let k = key_states.transpose(1, 2)?; let v = value_states.transpose(1, 2)?; let scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, scale, attention_mask.is_some())?.transpose(1, 2)? } else { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, ()))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new( rotary_emb: Arc<RotaryEmbedding>, use_flash_attn: bool, cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let self_attn = Attention::new(rotary_emb, use_flash_attn, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, device: Device, dtype: DType, hidden_size: usize, } impl Model { pub fn new(use_flash_attn: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), use_flash_attn, cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = Linear::new(embed_tokens.embeddings().clone(), None); Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), dtype: vb.dtype(), hidden_size: cfg.hidden_size, }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let xs = self.embed_tokens.forward(input_ids)?; let mut xs = (xs * (self.hidden_size as f64).sqrt())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/gemma.rs/0
{ "file_path": "candle/candle-transformers/src/models/gemma.rs", "repo_id": "candle", "token_count": 6869 }
40
use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm}; /// Mixtral Model /// https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py /// https://mistral.ai/news/mixtral-of-experts/ use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; use std::sync::Arc; /// https://github.com/huggingface/transformers/blob/1a585c1222a56bcaecc070966d558d4a9d862e83/src/transformers/models/mixtral/configuration_mixtral.py#L113 #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) hidden_size: usize, pub(crate) intermediate_size: usize, pub(crate) num_hidden_layers: usize, pub(crate) num_attention_heads: usize, pub(crate) num_key_value_heads: usize, pub(crate) hidden_act: Activation, pub(crate) max_position_embeddings: usize, pub(crate) rms_norm_eps: f64, pub(crate) rope_theta: f64, pub(crate) sliding_window: usize, pub(crate) num_experts_per_tok: usize, pub(crate) num_local_experts: usize, pub(crate) use_flash_attn: bool, } impl Config { /// https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/blob/main/config.json pub fn v0_1_8x7b(use_flash_attn: bool) -> Self { Self { vocab_size: 32000, hidden_size: 4096, intermediate_size: 14336, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 8, hidden_act: Activation::Silu, max_position_embeddings: 32768, rms_norm_eps: 1e-5, rope_theta: 1e6, sliding_window: 4096, num_experts_per_tok: 2, num_local_experts: 8, use_flash_attn, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / (cfg.rope_theta as f32).powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_flash_attn: bool, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, use_flash_attn: cfg.use_flash_attn, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?; let attn_output = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = query_states.transpose(1, 2)?; let k = key_states.transpose(1, 2)?; let v = value_states.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)? } else { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct BlockSparseTop2MLP { w1: Linear, w2: Linear, w3: Linear, act_fn: Activation, } impl BlockSparseTop2MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let w1 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w1"))?; let w2 = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("w2"))?; let w3 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w3"))?; Ok(Self { w1, w2, w3, act_fn: cfg.hidden_act, }) } } impl Module for BlockSparseTop2MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.w1)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.w3)?; (lhs * rhs)?.apply(&self.w2) } } #[derive(Debug, Clone)] struct SparseMoeBlock { gate: Linear, experts: Vec<BlockSparseTop2MLP>, num_experts_per_tok: usize, } impl SparseMoeBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let gate = linear_no_bias(cfg.hidden_size, cfg.num_local_experts, vb.pp("gate"))?; let mut experts = Vec::with_capacity(cfg.num_local_experts); let vb = vb.pp("experts"); for idx in 0..cfg.num_local_experts { let expert = BlockSparseTop2MLP::new(cfg, vb.pp(idx))?; experts.push(expert) } Ok(SparseMoeBlock { gate, experts, num_experts_per_tok: cfg.num_experts_per_tok, }) } } impl Module for SparseMoeBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, hidden_dim) = xs.dims3()?; let xs = xs.reshape(((), hidden_dim))?; let router_logits = xs.apply(&self.gate)?; let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?; // In order to extract topk, we extract the data from the tensor and manipulate it // directly. Maybe we will want to use some custom ops instead at some point. let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?; // routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) // top_x contains the row indexes to evaluate for each expert. let mut top_x = vec![vec![]; self.experts.len()]; let mut selected_rws = vec![vec![]; self.experts.len()]; for (row_idx, rw) in routing_weights.iter().enumerate() { let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>(); dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize])); let mut sum_routing_weights = 0f32; for &expert_idx in dst.iter().take(self.num_experts_per_tok) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; sum_routing_weights += routing_weight; top_x[expert_idx].push(row_idx as u32); } for &expert_idx in dst.iter().take(self.num_experts_per_tok) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; selected_rws[expert_idx].push(routing_weight / sum_routing_weights) } } // routing_weights /= routing_weights.sum(dim=-1, keepdim=True) // expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) let mut ys = xs.zeros_like()?; for (expert_idx, expert_layer) in self.experts.iter().enumerate() { let top_x = &top_x[expert_idx]; if top_x.is_empty() { continue; } let top_x = Tensor::new(top_x.as_slice(), xs.device())?; let selected_rws = Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())?.reshape(((), 1))?; // Index the correct hidden states and compute the expert hidden state for // the current expert. We need to make sure to multiply the output hidden // states by `routing_weights` on the corresponding tokens (top-1 and top-2) let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?; // current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None]) let current_hidden_states = expert_layer.forward(&current_state)?; let current_hidden_states = current_hidden_states.broadcast_mul(&selected_rws)?; ys = ys.index_add(&top_x, &current_hidden_states, 0)?; } let ys = ys.reshape((b_size, seq_len, hidden_dim))?; Ok(ys) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, block_sparse_moe: SparseMoeBlock, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let block_sparse_moe = SparseMoeBlock::new(cfg, vb.pp("block_sparse_moe"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, block_sparse_moe, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs .apply(&self.post_attention_layernorm)? .apply(&self.block_sparse_moe)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/mixtral.rs/0
{ "file_path": "candle/candle-transformers/src/models/mixtral.rs", "repo_id": "candle", "token_count": 8872 }
41
use candle::DType; use serde::Deserialize; pub const DTYPE: DType = DType::F32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum PositionEmbeddingType { Absolute, Alibi, } // https://github.com/huggingface/transformers/blob/main/src/transformers/models/persimmon/configuration_persimmon.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub hidden_act: candle_nn::Activation, pub max_position_embeddings: usize, pub initializer_range: f64, pub layer_norm_eps: f64, pub rms_norm_eps: f64, pub use_cache: bool, pub tie_word_embeddings: bool, pub rope_theta: f64, pub qk_layernorm: bool, pub partial_rotary_factor: f64, } impl Config { pub fn base_8b() -> Self { // https://huggingface.co/adept/persimmon-8b-base/blob/main/config.json Self { hidden_act: candle_nn::Activation::Relu, hidden_size: 4096, initializer_range: 0.02, intermediate_size: 16384, layer_norm_eps: 1e-05, max_position_embeddings: 16384, num_attention_heads: 64, num_hidden_layers: 36, num_key_value_heads: 64, qk_layernorm: true, rms_norm_eps: 1e-06, rope_theta: 25000.0, tie_word_embeddings: false, use_cache: true, vocab_size: 262144, partial_rotary_factor: 0.5, } } }
candle/candle-transformers/src/models/persimmon.rs/0
{ "file_path": "candle/candle-transformers/src/models/persimmon.rs", "repo_id": "candle", "token_count": 814 }
42